This commit is contained in:
yumoqing 2026-03-31 15:46:26 +08:00
parent b487bc9c34
commit c457e3bc33
4 changed files with 48 additions and 0 deletions

View File

@ -3,6 +3,9 @@ from appPublic.registerfunction import RegisterFunction
from ahserver.serverenv import ServerEnv from ahserver.serverenv import ServerEnv
from .keling import keling_token from .keling import keling_token
from .jimeng import jimeng_auth_headers from .jimeng import jimeng_auth_headers
from .utils import (
get_llm_by_model
)
from .llmclient import ( from .llmclient import (
b64media2url, b64media2url,
@ -29,6 +32,7 @@ from .asyncinference import (
def load_llmage(): def load_llmage():
env = ServerEnv() env = ServerEnv()
eng.get_llm_by_model = get_llm_by_model
env.llm_charging = llm_charging env.llm_charging = llm_charging
env.llm_accounting = llm_accounting env.llm_accounting = llm_accounting
env.get_today_asynctask_list = get_today_asynctask_list env.get_today_asynctask_list = get_today_asynctask_list

View File

@ -14,6 +14,13 @@ from uapi.appapi import UAPI, sor_get_callerid, sor_get_uapi
from ahserver.serverenv import get_serverenv, ServerEnv from ahserver.serverenv import get_serverenv, ServerEnv
from ahserver.filestorage import FileStorage from ahserver.filestorage import FileStorage
async def get_llm_by_model(id, lctype=None):
env = ServerEnv()
async with get_sor_context(env, 'llmage') as sor:
sql = 'select * from llm where model=${model}$'
recs = await sor.R('llm', {'model': model})
return recs
def erase_apikey(e): def erase_apikey(e):
e = str(e) e = str(e)
ss = e.split('Bearer ') ss = e.split('Bearer ')

12
wwwroot/tasks/index.dspy Normal file
View File

@ -0,0 +1,12 @@
taskid = params_kw.taskid
userid = await get_user()
if userid is None:
return openai_403
s = await get_asynctask_status(taskid)
return {
'status': 'ok',
'data': {
'resposne': s
}
}

25
wwwroot/video/index.dspy Normal file
View File

@ -0,0 +1,25 @@
params_kw.action = 't2v'
lctype =
userid = await get_user()
if userid is None:
return openai_403
if params_kw.model is None:
return openai_400()
llms = await get_llm_by_model(params_kw.model)
if llms is None:
debug(f'{params_kw.model=} not found in llm with error')
return openai_400()
if len(llms) == 0:
debug(f'{params_kw.model=} not found in llm')
return openai_400()
f = await checkCustomerBalance(params_kw.llmid, userorgid)
if not f:
return openai_429()
model = params_kw.model
env = request._run_ns
async with get_sor.context(env, 'llmage') as sor: