164 lines
4.5 KiB
JSON
164 lines
4.5 KiB
JSON
{
|
|
"summary": [
|
|
{
|
|
"name": "harnessed_reasoning_config",
|
|
"title": "Hermes Reasoning module configuration settings",
|
|
"primary": "id"
|
|
}
|
|
],
|
|
"fields": [
|
|
{
|
|
"name": "id",
|
|
"title": "Unique configuration identifier",
|
|
"type": "str",
|
|
"length": 32,
|
|
"nullable": "no"
|
|
},
|
|
{
|
|
"name": "user_id",
|
|
"title": "User ID for multi-user isolation",
|
|
"type": "str",
|
|
"length": 32,
|
|
"nullable": "no"
|
|
},
|
|
{
|
|
"name": "max_reasoning_steps",
|
|
"title": "Maximum reasoning steps per task",
|
|
"type": "int",
|
|
"nullable": "no",
|
|
"default": "10"
|
|
},
|
|
{
|
|
"name": "max_tool_calls_per_step",
|
|
"title": "Maximum tool calls per reasoning step",
|
|
"type": "int",
|
|
"nullable": "no",
|
|
"default": "5"
|
|
},
|
|
{
|
|
"name": "enable_cross_session_search",
|
|
"title": "Enable automatic session search",
|
|
"type": "str",
|
|
"length": "1",
|
|
"nullable": "no",
|
|
"default": "1"
|
|
},
|
|
{
|
|
"name": "enable_skill_auto_loading",
|
|
"title": "Enable automatic skill loading",
|
|
"type": "str",
|
|
"length": "1",
|
|
"nullable": "no",
|
|
"default": "1"
|
|
},
|
|
{
|
|
"name": "safety_mode",
|
|
"title": "Safety mode: strict, moderate, lenient",
|
|
"type": "str",
|
|
"length": "20",
|
|
"nullable": "no",
|
|
"default": "strict"
|
|
},
|
|
{
|
|
"name": "max_context_tokens",
|
|
"title": "Maximum tokens for reasoning context",
|
|
"type": "int",
|
|
"nullable": "no",
|
|
"default": "4000"
|
|
},
|
|
{
|
|
"name": "enable_error_recovery",
|
|
"title": "Enable automatic error recovery",
|
|
"type": "str",
|
|
"length": "1",
|
|
"nullable": "no",
|
|
"default": "1"
|
|
},
|
|
{
|
|
"name": "max_recovery_attempts",
|
|
"title": "Maximum recovery attempts per error",
|
|
"type": "int",
|
|
"nullable": "no",
|
|
"default": "3"
|
|
},
|
|
{
|
|
"name": "model_name",
|
|
"title": "LLM model name (e.g. qwen3-max, claude-sonnet-4)",
|
|
"type": "str",
|
|
"length": 64,
|
|
"nullable": "yes",
|
|
"default": "qwen3-max"
|
|
},
|
|
{
|
|
"name": "model_provider",
|
|
"title": "Model provider (e.g. openrouter, anthropic, dashscope)",
|
|
"type": "str",
|
|
"length": 32,
|
|
"nullable": "yes",
|
|
"default": "openrouter"
|
|
},
|
|
{
|
|
"name": "api_key",
|
|
"title": "API key for LLM provider",
|
|
"type": "text",
|
|
"nullable": "yes",
|
|
"default": ""
|
|
},
|
|
{
|
|
"name": "api_endpoint",
|
|
"title": "API endpoint URL (optional, uses default if empty)",
|
|
"type": "str",
|
|
"length": 255,
|
|
"nullable": "yes",
|
|
"default": ""
|
|
},
|
|
{
|
|
"name": "temperature",
|
|
"title": "LLM temperature (0.0-1.0)",
|
|
"type": "float",
|
|
"nullable": "no",
|
|
"default": "0.7"
|
|
},
|
|
{
|
|
"name": "max_output_tokens",
|
|
"title": "Maximum output tokens",
|
|
"type": "int",
|
|
"nullable": "no",
|
|
"default": "4096"
|
|
},
|
|
{
|
|
"name": "top_p",
|
|
"title": "Top-p sampling parameter",
|
|
"type": "float",
|
|
"nullable": "no",
|
|
"default": "0.9"
|
|
},
|
|
{
|
|
"name": "system_prompt",
|
|
"title": "System prompt template for reasoning",
|
|
"type": "text",
|
|
"nullable": "yes",
|
|
"default": ""
|
|
},
|
|
{
|
|
"name": "created_at",
|
|
"title": "Creation timestamp",
|
|
"type": "timestamp",
|
|
"nullable": "no"
|
|
},
|
|
{
|
|
"name": "updated_at",
|
|
"title": "Last update timestamp",
|
|
"type": "timestamp",
|
|
"nullable": "no"
|
|
}
|
|
],
|
|
"indexes": [
|
|
{
|
|
"name": "idx_user_config",
|
|
"idxtype": "index",
|
|
"idxfields": ["user_id"]
|
|
}
|
|
],
|
|
"codes": []
|
|
} |