fix: improve LLM call error handling and plan parsing

- Add detailed logging to _llm_call method
- Improve _parse_plan_json to handle more LLM response formats
- Show LLM error messages in reasoning_submit.dspy
- Better error handling and fallback for JSON parsing
This commit is contained in:
yumoqing 2026-05-07 14:51:44 +08:00
parent 8108e86ab5
commit c36477c9cb
2 changed files with 60 additions and 16 deletions

View File

@ -258,17 +258,23 @@ class HermesReasoningEngine:
# Use harnessed_agent's llm_chat if available # Use harnessed_agent's llm_chat if available
env = ServerEnv() env = ServerEnv()
if hasattr(env, 'llm_chat'): if hasattr(env, 'llm_chat'):
return await env.llm_chat( info(f"Calling llm_chat: model={model}, temp={temperature}, max_tokens={max_tokens}")
messages=messages, try:
model=model, result = await env.llm_chat(
temperature=temperature, messages=messages,
max_tokens=max_tokens, model=model,
**extra, temperature=temperature,
) max_tokens=max_tokens,
**extra,
# Fallback: direct config-based call (shouldn't happen in production) )
error("llm_chat not available on ServerEnv") info(f"llm_chat returned: success={'error' not in result}")
return {'error': {'message': 'LLM client not available'}} return result
except Exception as e:
error(f"llm_chat exception: {e}")
return {'error': {'message': f'llm_chat exception: {str(e)}', 'type': 'exception'}}
else:
error("llm_chat not available on ServerEnv")
return {'error': {'message': 'LLM client not available (llm_chat not registered)', 'type': 'configuration_error'}}
async def _generate_plan(self, request: str, context: Dict[str, Any], async def _generate_plan(self, request: str, context: Dict[str, Any],
config: Dict[str, Any]) -> Dict[str, Any]: config: Dict[str, Any]) -> Dict[str, Any]:
@ -321,8 +327,9 @@ class HermesReasoningEngine:
def _parse_plan_json(self, text: str) -> Dict[str, Any]: def _parse_plan_json(self, text: str) -> Dict[str, Any]:
"""Extract and parse JSON plan from LLM response.""" """Extract and parse JSON plan from LLM response."""
# Try to find JSON block
import re import re
# Try to find JSON block in code fence
json_match = re.search(r'```json\s*\n(.*?)\n```', text, re.DOTALL) json_match = re.search(r'```json\s*\n(.*?)\n```', text, re.DOTALL)
if json_match: if json_match:
text = json_match.group(1) text = json_match.group(1)
@ -340,10 +347,37 @@ class HermesReasoningEngine:
plan.setdefault('analysis', '') plan.setdefault('analysis', '')
plan.setdefault('steps', []) plan.setdefault('steps', [])
plan.setdefault('safety_notes', []) plan.setdefault('safety_notes', [])
# Validate steps structure
valid_steps = []
for i, step in enumerate(plan.get('steps', [])):
if isinstance(step, dict):
step.setdefault('step_number', i + 1)
step.setdefault('description', '')
step.setdefault('actions', [])
valid_steps.append(step)
elif isinstance(step, str):
# Handle case where LLM returns list of strings
valid_steps.append({
'step_number': i + 1,
'description': step,
'actions': []
})
plan['steps'] = valid_steps
return plan return plan
except json.JSONDecodeError: except json.JSONDecodeError:
error(f"Failed to parse LLM plan JSON: {text[:200]}") error(f"Failed to parse LLM plan JSON: {text[:500]}")
return {'analysis': '无法解析 LLM 返回的计划', 'steps': [], 'safety_notes': []} # Fallback: create a simple plan from the response text
return {
'analysis': text[:200],
'steps': [{
'step_number': 1,
'description': text[:100],
'actions': []
}],
'safety_notes': []
}
# -------------------------------------------------------- # --------------------------------------------------------
# Tool execution # Tool execution

View File

@ -21,31 +21,41 @@ try:
if reasoning_result.get('success'): if reasoning_result.get('success'):
# Build result widget showing reasoning output # Build result widget showing reasoning output
analysis = reasoning_result.get('analysis', '无')
llm_error = reasoning_result.get('llm_error', '')
plan_items = [] plan_items = []
for step in reasoning_result.get('execution_plan', []): for step in reasoning_result.get('execution_plan', []):
plan_items.append(f"步骤{step.get('step_number', '?')}: {step.get('description', '')}") plan_items.append(f"步骤{step.get('step_number', '?')}: {step.get('description', '')}")
plan_text = '\n'.join(plan_items) if plan_items else '无执行计划' plan_text = '\n'.join(plan_items) if plan_items else '无执行计划'
safety_text = '\n'.join(reasoning_result.get('safety_violations', [])) or '无安全风险' safety_text = '\n'.join(reasoning_result.get('safety_violations', [])) or '无安全风险'
status = reasoning_result.get('status', 'unknown')
summary = ( summary = (
f"请求: {request_text}\n\n" f"请求: {request_text}\n\n"
f"分析: {analysis}\n\n"
f"上下文: {reasoning_result.get('context_summary', '无')}\n\n" f"上下文: {reasoning_result.get('context_summary', '无')}\n\n"
f"置信度: {reasoning_result.get('confidence_score', 0):.0%}\n\n" f"置信度: {reasoning_result.get('confidence_score', 0):.0%}\n\n"
f"执行计划:\n{plan_text}\n\n" f"执行计划:\n{plan_text}\n\n"
f"安全检查:\n{safety_text}\n\n" f"安全检查:\n{safety_text}\n\n"
f"状态: {reasoning_result.get('status', 'unknown')}" f"状态: {status}"
) )
# Show LLM error if any
if llm_error:
summary += f"\n\nLLM 错误: {llm_error}"
if reasoning_result.get('execution_results'): if reasoning_result.get('execution_results'):
summary += f"\n\n执行结果:\n{json.dumps(reasoning_result['execution_results'], ensure_ascii=False, indent=2)}" summary += f"\n\n执行结果:\n{json.dumps(reasoning_result['execution_results'], ensure_ascii=False, indent=2)}"
msg_type = 'success' if not llm_error else 'warning'
result = { result = {
'widgettype': 'Message', 'widgettype': 'Message',
'options': { 'options': {
'title': '推理完成', 'title': '推理完成',
'message': summary, 'message': summary,
'type': 'success' 'type': msg_type
} }
} }
else: else: