119 lines
6.2 KiB
Python
119 lines
6.2 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""Save reasoning configuration for current user (includes model config)"""
|
|
import json, uuid, time
|
|
|
|
result = {'widgettype': 'Message', 'options': {'title': 'Error', 'message': 'Invalid request', 'type': 'error'}}
|
|
|
|
try:
|
|
dbname = get_module_dbname('harnessed_reasoning')
|
|
user_id = await get_user()
|
|
now = time.strftime('%Y-%m-%d %H:%M:%S')
|
|
|
|
# Reasoning engine params
|
|
max_reasoning_steps = int(params_kw.get('max_reasoning_steps', 10))
|
|
max_tool_calls_per_step = int(params_kw.get('max_tool_calls_per_step', 5))
|
|
enable_cross_session_search = '1' if params_kw.get('enable_cross_session_search') == '1' else '0'
|
|
enable_skill_auto_loading = '1' if params_kw.get('enable_skill_auto_loading') == '1' else '0'
|
|
safety_mode = params_kw.get('safety_mode', 'strict').strip()
|
|
max_context_tokens = int(params_kw.get('max_context_tokens', 4000))
|
|
enable_error_recovery = '1' if params_kw.get('enable_error_recovery') == '1' else '0'
|
|
max_recovery_attempts = int(params_kw.get('max_recovery_attempts', 3))
|
|
|
|
# Model configuration
|
|
model_name = params_kw.get('model_name', 'qwen3-max').strip()
|
|
model_provider = params_kw.get('model_provider', 'openrouter').strip()
|
|
api_key = params_kw.get('api_key', '').strip()
|
|
api_endpoint = params_kw.get('api_endpoint', '').strip()
|
|
temperature = float(params_kw.get('temperature', 0.7))
|
|
max_output_tokens = int(params_kw.get('max_output_tokens', 4096))
|
|
top_p = float(params_kw.get('top_p', 0.9))
|
|
system_prompt = params_kw.get('system_prompt', '').strip()
|
|
|
|
async with DBPools().sqlorContext(dbname) as sor:
|
|
rows = await sor.sqlExe("SELECT id FROM harnessed_reasoning_config WHERE user_id = ${user_id}$", {'user_id': user_id})
|
|
|
|
if rows and len(rows) > 0:
|
|
config_id = rows[0]['id']
|
|
await sor.sqlExe("""UPDATE harnessed_reasoning_config SET
|
|
max_reasoning_steps = ${max_reasoning_steps}$,
|
|
max_tool_calls_per_step = ${max_tool_calls_per_step}$,
|
|
enable_cross_session_search = ${enable_cross_session_search}$,
|
|
enable_skill_auto_loading = ${enable_skill_auto_loading}$,
|
|
safety_mode = ${safety_mode}$,
|
|
max_context_tokens = ${max_context_tokens}$,
|
|
enable_error_recovery = ${enable_error_recovery}$,
|
|
max_recovery_attempts = ${max_recovery_attempts}$,
|
|
model_name = ${model_name}$,
|
|
model_provider = ${model_provider}$,
|
|
api_key = ${api_key}$,
|
|
api_endpoint = ${api_endpoint}$,
|
|
temperature = ${temperature}$,
|
|
max_output_tokens = ${max_output_tokens}$,
|
|
top_p = ${top_p}$,
|
|
system_prompt = ${system_prompt}$,
|
|
updated_at = ${updated_at}$
|
|
WHERE id = ${id}$""", {
|
|
'id': config_id,
|
|
'max_reasoning_steps': max_reasoning_steps,
|
|
'max_tool_calls_per_step': max_tool_calls_per_step,
|
|
'enable_cross_session_search': enable_cross_session_search,
|
|
'enable_skill_auto_loading': enable_skill_auto_loading,
|
|
'safety_mode': safety_mode,
|
|
'max_context_tokens': max_context_tokens,
|
|
'enable_error_recovery': enable_error_recovery,
|
|
'max_recovery_attempts': max_recovery_attempts,
|
|
'model_name': model_name,
|
|
'model_provider': model_provider,
|
|
'api_key': api_key,
|
|
'api_endpoint': api_endpoint,
|
|
'temperature': temperature,
|
|
'max_output_tokens': max_output_tokens,
|
|
'top_p': top_p,
|
|
'system_prompt': system_prompt,
|
|
'updated_at': now
|
|
})
|
|
else:
|
|
config_id = str(uuid.uuid4()).replace('-', '')[:32]
|
|
await sor.sqlExe("""INSERT INTO harnessed_reasoning_config
|
|
(id, user_id, max_reasoning_steps, max_tool_calls_per_step,
|
|
enable_cross_session_search, enable_skill_auto_loading, safety_mode,
|
|
max_context_tokens, enable_error_recovery, max_recovery_attempts,
|
|
model_name, model_provider, api_key, api_endpoint,
|
|
temperature, max_output_tokens, top_p, system_prompt,
|
|
created_at, updated_at)
|
|
VALUES (${id}$, ${user_id}$, ${max_reasoning_steps}$, ${max_tool_calls_per_step}$,
|
|
${enable_cross_session_search}$, ${enable_skill_auto_loading}$, ${safety_mode}$,
|
|
${max_context_tokens}$, ${enable_error_recovery}$, ${max_recovery_attempts}$,
|
|
${model_name}$, ${model_provider}$, ${api_key}$, ${api_endpoint}$,
|
|
${temperature}$, ${max_output_tokens}$, ${top_p}$, ${system_prompt}$,
|
|
${created_at}$, ${updated_at}$)""", {
|
|
'id': config_id,
|
|
'user_id': user_id,
|
|
'max_reasoning_steps': max_reasoning_steps,
|
|
'max_tool_calls_per_step': max_tool_calls_per_step,
|
|
'enable_cross_session_search': enable_cross_session_search,
|
|
'enable_skill_auto_loading': enable_skill_auto_loading,
|
|
'safety_mode': safety_mode,
|
|
'max_context_tokens': max_context_tokens,
|
|
'enable_error_recovery': enable_error_recovery,
|
|
'max_recovery_attempts': max_recovery_attempts,
|
|
'model_name': model_name,
|
|
'model_provider': model_provider,
|
|
'api_key': api_key,
|
|
'api_endpoint': api_endpoint,
|
|
'temperature': temperature,
|
|
'max_output_tokens': max_output_tokens,
|
|
'top_p': top_p,
|
|
'system_prompt': system_prompt,
|
|
'created_at': now,
|
|
'updated_at': now
|
|
})
|
|
|
|
result = {'widgettype': 'Message', 'options': {'title': 'Success', 'message': '配置保存成功', 'type': 'success'}}
|
|
|
|
except Exception as e:
|
|
result['options'] = {'title': 'Error', 'message': '保存失败: ' + str(e), 'type': 'error'}
|
|
|
|
return json.dumps(result, ensure_ascii=False)
|