From ab7cb9a7346bd693b1867700173131bea76e3ebf Mon Sep 17 00:00:00 2001 From: yumoqing Date: Tue, 31 Mar 2026 16:11:29 +0800 Subject: [PATCH] bugfix --- llmage/llmclient.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llmage/llmclient.py b/llmage/llmclient.py index 6be5ff4..08cf662 100644 --- a/llmage/llmclient.py +++ b/llmage/llmclient.py @@ -174,14 +174,14 @@ async def _inference_generator(request, callerid, callerorgid, return if not params_kw.model: params_kw.model = llm.model - if params_kw.nostream and llm.stream == 'stream': + if params_kw.stream and llm.stream == 'stream': llm.stream = 'sync' if llm.stream == 'async': if llm.callbackurl: cb_url = env.entire_url(llm.callbackurl) params_kw.callbackurl = cb_url f = partial(async_uapi_request, request, llm, sor, callerid, callerorgid, params_kw=params_kw) - elif llm.stream == 'sync': + elif not params.stream: f = partial(sync_uapi_request, request, llm, sor, callerid, callerorgid, params_kw=params_kw) # env.update(llm) else: