diff --git a/wwwroot/chat/completions/index.dspy b/wwwroot/chat/completions/index.dspy new file mode 100644 index 0000000..428d7e0 --- /dev/null +++ b/wwwroot/chat/completions/index.dspy @@ -0,0 +1,38 @@ +debug(f'{params_kw=}') +lctype='文生文' +if params_kw.off_peak: + off_peak = params_kw.off_peak + if off_peak in [True, "Y" "y", 1, "1"]: + off_peak = True + else: + off_peak = False + params_kw.off_peak = off_peak +userid = await get_user() +userorgid = await get_userorgid() +if userid is None: + return openai_403() + +if not params_kw.prompt: + d = return_error('Missing need data(prompt)') + return json_response(d, status=400) +env = request._run_ns +async with get_sor_context(env, 'llmage') as sor: + sql = """select a.* from llm a, llmcatelog b +where a.llmcatelogid=b.id + and a.model=${model}$ + and b.name = ${lctype}$""" + recs = await sor.sqlExe(sql, { + 'lctype': lctype, + 'model': params_kw.model or 'qwen3-max' + }) + if len(recs) == 0: + return openai_400() + params_kw.llmid = recs[0].id + +f = await checkCustomerBalance(params_kw.llmid, userorgid) +if not f: + return openai_429() + +env = DictObject(**globals()) +return await inference(request, env=env) +