base_agent/config.yaml

72 lines
3.1 KiB
YAML
Raw Normal View History

2026-03-09 05:37:29 +00:00
# ════════════════════════════════════════════════════════════════
# config/config.yaml
# Agent 系统全局配置文件
# ════════════════════════════════════════════════════════════════
# ── LLM 模型配置 ───────────────────────────────────────────────
llm:
provider: "openai" # 模型提供商: openai | anthropic | ollama | local
model_name: "gpt-4o" # 模型名称
api_key: "sk-AUmOuFI731Ty5Nob38jY26d8lydfDT-QkE2giqb0sCuPCAE2JH6zjLM4lZLpvL5WMYPOocaMe2FwVDmqM_9KimmKACjR" # API Key优先读取环境变量 LLM_API_KEY
api_base_url: "https://openapi.monica.im/v1" # 自定义 API 地址(兼容第三方 OpenAI 代理)
max_tokens: 4096 # 最大输出 Token 数
temperature: 0.7 # 生成温度 0.0~1.0
timeout: 60 # 请求超时(秒)
max_retries: 3 # 失败自动重试次数
# OpenAI 专用
function_calling: true # 是否启用 Function Calling工具规划核心
stream: false # 是否启用流式输出
# Ollama / 本地模型专用
model_path: "" # 本地模型路径,例如 /models/llama3
ollama_host: "http://localhost:11434"
# ── MCP Server 配置 ────────────────────────────────────────────
mcp:
server_name: "DemoMCPServer"
transport: "stdio"
host: "localhost"
port: 3000
enabled_tools:
- calculator
- web_search
- file_reader
- code_executor
# ── 工具配置 ───────────────────────────────────────────────────
tools:
web_search:
max_results: 5
timeout: 10
api_key: ""
engine: "mock"
file_reader:
allowed_root: "./workspace"
max_file_size_kb: 512
code_executor:
timeout: 5
sandbox: true
calculator:
precision: 10
# ── 记忆配置 ───────────────────────────────────────────────────
memory:
max_history: 20
enable_long_term: false
vector_db_url: ""
# ── 日志配置 ───────────────────────────────────────────────────
logging:
level: "DEBUG"
enable_file: true
log_dir: "./logs"
log_file: "agent.log"
# ── Agent 行为配置 ─────────────────────────────────────────────
agent:
max_chain_steps: 10
enable_multi_step: true
session_timeout: 3600
fallback_to_rules: true # API 调用失败时是否降级到规则引擎