This commit is contained in:
sontolau 2026-03-09 13:37:29 +08:00
parent 4ab494aae5
commit 169666443f
20 changed files with 2597 additions and 692 deletions

View File

@ -1,160 +1,386 @@
"""客户端:用户交互 & 会话管理"""
"""
client/agent_client.py
Agent 客户端协调 LLM 引擎MCP 服务器记忆模块驱动完整 Agent 执行流程
Agent 客户端驱动完整 OpenAI Function Calling + Tool Chain 执行流程
新增: OpenAI 格式消息序列管理支持多轮工具调用上下文传递
"""
from dataclasses import dataclass
import uuid
from dataclasses import dataclass, field
from config.settings import settings
from llm.llm_engine import LLMEngine
from mcp.mcp_protocol import MCPMethod, MCPRequest
from mcp.mcp_protocol import (
ChainPlan, ChainResult, MCPRequest, MCPResponse,
StepResult, ToolStep,
)
from mcp.mcp_server import MCPServer
from memory.memory_store import MemoryStore
from utils.logger import get_logger
# ── 单轮执行结果 ───────────────────────────────────────────────
@dataclass
class AgentResponse:
"""一次完整 Agent 调用的结果"""
user_input: str
final_reply: str
tool_used: str | None = None
tool_output: str | None = None
success: bool = True
error: str | None = None
user_input: str
final_reply: str
chain_result: ChainResult | None = None
tool_used: str | None = None
tool_output: str | None = None
success: bool = True
error: str | None = None
token_usage: dict = field(default_factory=dict)
@property
def is_multi_step(self) -> bool:
return self.chain_result is not None and self.chain_result.total_steps > 1
# ── Agent 客户端 ───────────────────────────────────────────────
class AgentClient:
"""
Agent 客户端实现完整的 ReAct 执行循环
Agent 客户端OpenAI Function Calling + Multi-Step Tool Chain
执行流程 (5):
执行流程:
1. [CLIENT] 接收用户输入写入 Memory
2. [LLM] 分析意图决策是否调用工具
3. [MCP] 构造 JSON-RPC 请求发送给 MCP Server
4. [TOOL] MCP Server 执行工具返回结果
5. [LLM] 整合结果生成最终回复写入 Memory
2. [LLM] plan_tool_chain() OpenAI Function Calling ChainPlan
3. [CHAIN] 串行执行每个 ToolStep:
a. 构造 MCPRequest MCPServer 执行工具
b. 将工具结果追加到 OpenAI 消息序列tool role
c. 记录 StepResult更新链路上下文占位符替换
4. [LLM] generate_chain_reply() OpenAI 整合结果 最终回复
5. [MEMORY] 写入完整调用链记录
使用示例:
client = AgentClient(llm=llm, mcp_server=mcp, memory=memory)
response = client.chat("帮我计算 100 * 200")
print(response.final_reply)
OpenAI 消息序列示例多步骤:
{"role": "system", "content": "规划器提示"}
{"role": "user", "content": "搜索天气然后计算..."}
{"role": "assistant", "tool_calls": [{web_search}, {calculator}]}
{"role": "tool", "content": "web_search 结果", "tool_call_id": "call_1"}
{"role": "tool", "content": "calculator 结果", "tool_call_id": "call_2"}
generate_reply() 最终自然语言回复
"""
def __init__(
self,
llm: LLMEngine,
mcp_server: MCPServer,
memory: MemoryStore,
prompt: str = ""
self,
llm: LLMEngine,
mcp_server: MCPServer,
memory: MemoryStore,
):
self.llm = llm
self.llm = llm
self.mcp_server = mcp_server
self.memory = memory
self.agent_prompt = prompt
self.logger = get_logger("CLIENT")
self.logger.info("💻 Agent Client 初始化完成")
self.memory = memory
self.logger = get_logger("CLIENT")
# OpenAI 格式的结构化对话历史(跨轮次保持上下文)
self._openai_history: list[dict] = []
self.logger.info("💻 Agent Client 初始化完成OpenAI Function Calling 模式)")
# ── 主入口 ──────────────────────────────────────────────────
# ════════════════════════════════════════════════════════════
# 主入口
# ════════════════════════════════════════════════════════════
def chat(self, user_input: str) -> AgentResponse:
"""
处理一轮用户对话执行完整 Agent 流程
"""处理一轮用户对话"""
sep = "" * 60
self.logger.info(sep)
self.logger.info(f"📨 收到用户输入: {user_input}")
self.logger.info(sep)
Args:
user_input: 用户输入的自然语言文本
Returns:
AgentResponse 实例
"""
self.logger.info(f"{'=' * 55}")
self.logger.info(f"📨 Step 1 [CLIENT] 收到用户输入: {user_input}")
# ── Step 1: 记录用户消息 ────────────────────────────────
# Step 1: 记录用户消息
self.memory.add_user_message(user_input)
context = self.memory.get_context_summary()
# ── Step 2: LLM 推理决策 ────────────────────────────────
self.logger.info("🧠 Step 2 [LLM] 开始推理,分析意图...")
# Step 2: LLM 规划工具调用链
self.logger.info("🗺 Step 2 [LLM] 规划工具调用链...")
tool_schemas = self.mcp_server.get_tool_schemas()
decision = self.llm.think_and_decide(user_input, tool_schemas, context, self.agent_prompt)
plan: ChainPlan = self.llm.plan_tool_chain(
user_input=user_input,
tool_schemas=tool_schemas,
context=context,
history=self._openai_history,
)
# ── 分支:是否需要工具 ──────────────────────────────────
if not decision.need_tool:
# 无需工具:直接回复
if not plan.steps:
return self._handle_direct_reply(user_input, context)
return self._handle_tool_call(user_input, decision, context)
# Step 3~4: 执行工具调用链,构造 OpenAI 消息序列
chain_result, tool_messages = self._execute_chain(plan, user_input)
# ── 私有流程方法 ────────────────────────────────────────────
# Step 5: 调用 OpenAI 整合结果,生成最终回复
return self._generate_response(user_input, chain_result, tool_messages, context)
def _handle_direct_reply(self, user_input: str, context: str) -> AgentResponse:
"""无需工具时直接生成回复"""
self.logger.info("💬 无需工具,直接生成回复")
reply = self.llm.generate_direct_reply(user_input, context)
self.memory.add_assistant_message(reply)
return AgentResponse(user_input=user_input, final_reply=reply)
# ════════════════════════════════════════════════════════════
# 串行执行引擎
# ════════════════════════════════════════════════════════════
def _handle_tool_call(
self,
user_input: str,
decision,
context: str,
) -> AgentResponse:
"""执行工具调用的完整流程Step 3 → 4 → 5"""
def _execute_chain(
self,
plan: ChainPlan,
user_input: str,
) -> tuple[ChainResult, list[dict]]:
"""
串行执行工具调用链同步构造 OpenAI 消息序列
# ── Step 3: 构造 MCP 请求 ───────────────────────────────
mcp_request: MCPRequest = decision.to_mcp_request()
Returns:
(ChainResult, tool_messages)
tool_messages OpenAI 格式的工具调用消息列表
用于后续 generate_reply() 调用
"""
self.logger.info(
f"📡 Step 3 [MCP] 发送工具调用请求\n"
f" 方法: {mcp_request.method}\n"
f" 工具: {decision.tool_name}\n"
f" 参数: {decision.arguments}\n"
f" 请求体: {mcp_request.to_dict()}"
f"\n{'' * 60}\n"
f" 🔗 开始执行工具调用链\n"
f" 目标: {plan.goal}\n"
f" 步骤: {plan.step_count}\n"
f"{'' * 60}"
)
# ── Step 4: MCP Server 执行工具 ─────────────────────────
self.logger.info(f"🔧 Step 4 [TOOL] MCP Server 执行工具 [{decision.tool_name}]...")
mcp_response = self.mcp_server.handle_request(mcp_request)
step_results: list[StepResult] = []
chain_context: dict[str, str] = {}
tool_messages: list[dict] = []
failed_step: int | None = None
if not mcp_response.success:
error_msg = f"工具调用失败: {mcp_response.error}"
self.logger.error(f"{error_msg}")
return AgentResponse(
user_input=user_input,
final_reply=f"抱歉,工具调用失败:{mcp_response.error.get('message')}",
tool_used=decision.tool_name,
# 构造 assistant 消息(含 tool_calls 声明)
assistant_tool_calls = self._build_assistant_tool_calls(plan)
tool_messages.append({
"role": "assistant",
"content": None,
"tool_calls": assistant_tool_calls,
})
for step in plan.steps:
# 检查前置依赖
if self._has_failed_dependency(step, failed_step):
self.logger.warning(
f"⏭ Step {step.step_id} [{step.tool_name}] 跳过"
f"(依赖步骤 {failed_step} 失败)"
)
step_results.append(StepResult(
step_id=step.step_id,
tool_name=step.tool_name,
success=False,
output="",
error=f"跳过:依赖步骤 {failed_step} 失败",
))
# 向 OpenAI 消息序列写入失败占位
tool_messages.append({
"role": "tool",
"content": f"步骤跳过:依赖步骤 {failed_step} 执行失败",
"tool_call_id": assistant_tool_calls[step.step_id - 1]["id"],
})
continue
# 执行单步
result, tool_call_id = self._execute_single_step(
step, chain_context, assistant_tool_calls
)
step_results.append(result)
# 追加 tool 消息到 OpenAI 序列
tool_messages.append({
"role": "tool",
"content": result.output if result.success else f"执行失败: {result.error}",
"tool_call_id": tool_call_id,
})
if result.success:
chain_context[result.context_key] = result.output
self.memory.add_tool_result(step.tool_name, result.output)
else:
failed_step = step.step_id
overall_success = failed_step is None
chain_result = ChainResult(
goal=plan.goal,
step_results=step_results,
success=overall_success,
failed_step=failed_step,
)
self.logger.info(
f"{'' * 60}\n"
f" {'✅ 调用链执行完成' if overall_success else '⚠️ 调用链部分失败'}\n"
f" 完成: {chain_result.completed_steps}/{chain_result.total_steps}\n"
f"{'' * 60}"
)
return chain_result, tool_messages
def _execute_single_step(
self,
step: ToolStep,
chain_context: dict[str, str],
assistant_tool_calls: list[dict],
) -> tuple[StepResult, str]:
"""
执行单个步骤返回 (StepResult, tool_call_id)
Returns:
StepResult: 步骤执行结果
tool_call_id: 对应的 OpenAI tool_call_id用于消息序列关联
"""
# 注入前步上下文(占位符替换)
resolved_step = step.inject_context(chain_context)
tool_call_id = assistant_tool_calls[step.step_id - 1]["id"]
self.logger.info(
f"\n ▶ Step {step.step_id} 执行中\n"
f" 工具: [{resolved_step.tool_name}]\n"
f" 说明: {resolved_step.description}\n"
f" 参数: {resolved_step.arguments}\n"
f" call_id: {tool_call_id}"
)
# 构造并发送 MCP 请求
mcp_request: MCPRequest = resolved_step.to_mcp_request()
mcp_response: MCPResponse = self.mcp_server.handle_request(mcp_request)
if mcp_response.success:
output = mcp_response.content
self.logger.info(f" ✅ Step {step.step_id} 成功: {output[:80]}...")
return StepResult(
step_id=step.step_id,
tool_name=step.tool_name,
success=True,
output=output,
), tool_call_id
else:
error_msg = mcp_response.error.get("message", "未知错误")
self.logger.error(f" ❌ Step {step.step_id} 失败: {error_msg}")
return StepResult(
step_id=step.step_id,
tool_name=step.tool_name,
success=False,
output="",
error=error_msg,
), tool_call_id
# ════════════════════════════════════════════════════════════
# 回复生成
# ════════════════════════════════════════════════════════════
def _generate_response(
self,
user_input: str,
chain_result: ChainResult,
tool_messages: list[dict],
context: str,
) -> AgentResponse:
"""调用 OpenAI 整合工具结果,生成最终 AgentResponse"""
self.logger.info("✍️ Step 5 [LLM] 调用 OpenAI 生成最终回复...")
chain_summary = self._build_chain_summary(chain_result)
# 单步走简洁路径
if chain_result.total_steps == 1:
r = chain_result.step_results[0]
final_reply = self.llm.generate_final_reply(
user_input=user_input,
tool_name=r.tool_name,
tool_output=r.output,
context=context,
tool_call_id=tool_messages[-1].get("tool_call_id", "") if tool_messages else "",
)
else:
# 多步走完整 OpenAI 消息序列路径
final_reply = self.llm.generate_chain_reply(
user_input=user_input,
chain_summary=chain_summary,
context=context,
tool_messages=tool_messages,
)
tool_output = mcp_response.content
self.logger.info(f"✅ 工具执行成功,输出: {tool_output[:80]}...")
self.memory.add_tool_result(decision.tool_name, tool_output)
chain_result.final_reply = final_reply
# ── Step 5: LLM 整合结果,生成最终回复 ──────────────────
self.logger.info("✍️ Step 5 [LLM] 整合工具结果,生成最终回复...")
final_reply = self.llm.generate_final_reply(
user_input, decision.tool_name, tool_output, context
)
self.memory.add_assistant_message(final_reply)
# 更新 OpenAI 结构化历史(供下一轮使用)
self._openai_history.append({"role": "user", "content": user_input})
self._openai_history.append({"role": "assistant", "content": final_reply})
# 保留最近 10 轮
if len(self._openai_history) > 20:
self._openai_history = self._openai_history[-20:]
self.logger.info(f"🎉 [CLIENT] 流程完成,回复已返回")
# 写入 Memory
if chain_result.total_steps > 1:
self.memory.add_chain_result(chain_result)
else:
self.memory.add_assistant_message(final_reply)
self.logger.info("🎉 [CLIENT] 流程完成,回复已返回")
return AgentResponse(
user_input=user_input,
final_reply=final_reply,
tool_used=decision.tool_name,
tool_output=tool_output,
chain_result=chain_result,
tool_used=chain_result.step_results[0].tool_name if chain_result.total_steps == 1 else None,
tool_output=chain_result.step_results[0].output if chain_result.total_steps == 1 else None,
success=chain_result.success,
)
# ── 工具方法 ────────────────────────────────────────────────
def _handle_direct_reply(self, user_input: str, context: str) -> AgentResponse:
"""无需工具时直接调用 OpenAI 生成回复"""
self.logger.info("💬 无需工具,直接调用 OpenAI 生成回复")
reply = self.llm.generate_direct_reply(user_input, context)
self.memory.add_assistant_message(reply)
self._openai_history.append({"role": "user", "content": user_input})
self._openai_history.append({"role": "assistant", "content": reply})
return AgentResponse(user_input=user_input, final_reply=reply)
# ════════════════════════════════════════════════════════════
# 工具方法
# ════════════════════════════════════════════════════════════
@staticmethod
def _build_assistant_tool_calls(plan: ChainPlan) -> list[dict]:
"""
构造 OpenAI assistant 消息中的 tool_calls 字段
格式:
[
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "calculator",
"arguments": '{"expression": "1+2"}'
}
}
]
"""
import json
tool_calls = []
for step in plan.steps:
tool_calls.append({
"id": f"call_{uuid.uuid4().hex[:8]}",
"type": "function",
"function": {
"name": step.tool_name,
"arguments": json.dumps(step.arguments, ensure_ascii=False),
},
})
return tool_calls
@staticmethod
def _build_chain_summary(chain_result: ChainResult) -> str:
"""将调用链结果格式化为 LLM 可读的摘要"""
lines = []
for r in chain_result.step_results:
if r.success:
lines.append(
f"**Step {r.step_id} [{r.tool_name}]** ✅\n"
f"```\n{r.output[:300]}\n```"
)
else:
lines.append(
f"**Step {r.step_id} [{r.tool_name}]** ❌\n"
f"错误: {r.error}"
)
return "\n\n".join(lines)
@staticmethod
def _has_failed_dependency(step: ToolStep, failed_step: int | None) -> bool:
return failed_step is not None and failed_step in step.depends_on
def get_memory_stats(self) -> dict:
"""获取当前记忆统计"""
return self.memory.stats()
stats = self.memory.stats()
stats["openai_history_len"] = len(self._openai_history)
return stats
def clear_session(self) -> None:
"""清空当前会话"""
self.memory.clear_history()
self.logger.info("🗑 会话已清空")
self._openai_history.clear()
self.logger.info("🗑 会话已清空(含 OpenAI 消息历史)")

72
config.yaml Normal file
View File

@ -0,0 +1,72 @@
# ════════════════════════════════════════════════════════════════
# config/config.yaml
# Agent 系统全局配置文件
# ════════════════════════════════════════════════════════════════
# ── LLM 模型配置 ───────────────────────────────────────────────
llm:
provider: "openai" # 模型提供商: openai | anthropic | ollama | local
model_name: "gpt-4o" # 模型名称
api_key: "sk-AUmOuFI731Ty5Nob38jY26d8lydfDT-QkE2giqb0sCuPCAE2JH6zjLM4lZLpvL5WMYPOocaMe2FwVDmqM_9KimmKACjR" # API Key优先读取环境变量 LLM_API_KEY
api_base_url: "https://openapi.monica.im/v1" # 自定义 API 地址(兼容第三方 OpenAI 代理)
max_tokens: 4096 # 最大输出 Token 数
temperature: 0.7 # 生成温度 0.0~1.0
timeout: 60 # 请求超时(秒)
max_retries: 3 # 失败自动重试次数
# OpenAI 专用
function_calling: true # 是否启用 Function Calling工具规划核心
stream: false # 是否启用流式输出
# Ollama / 本地模型专用
model_path: "" # 本地模型路径,例如 /models/llama3
ollama_host: "http://localhost:11434"
# ── MCP Server 配置 ────────────────────────────────────────────
mcp:
server_name: "DemoMCPServer"
transport: "stdio"
host: "localhost"
port: 3000
enabled_tools:
- calculator
- web_search
- file_reader
- code_executor
# ── 工具配置 ───────────────────────────────────────────────────
tools:
web_search:
max_results: 5
timeout: 10
api_key: ""
engine: "mock"
file_reader:
allowed_root: "./workspace"
max_file_size_kb: 512
code_executor:
timeout: 5
sandbox: true
calculator:
precision: 10
# ── 记忆配置 ───────────────────────────────────────────────────
memory:
max_history: 20
enable_long_term: false
vector_db_url: ""
# ── 日志配置 ───────────────────────────────────────────────────
logging:
level: "DEBUG"
enable_file: true
log_dir: "./logs"
log_file: "agent.log"
# ── Agent 行为配置 ─────────────────────────────────────────────
agent:
max_chain_steps: 10
enable_multi_step: true
session_timeout: 3600
fallback_to_rules: true # API 调用失败时是否降级到规则引擎

0
config/__init__.py Normal file
View File

278
config/settings.py Normal file
View File

@ -0,0 +1,278 @@
"""
config/settings.py
配置加载与管理模块新增 OpenAI 专用字段
"""
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
try:
import yaml
_YAML_AVAILABLE = True
except ImportError:
_YAML_AVAILABLE = False
# ════════════════════════════════════════════════════════════════
# 配置数据类
# ════════════════════════════════════════════════════════════════
@dataclass
class LLMConfig:
"""LLM 模型配置(含 OpenAI 专用字段)"""
provider: str = "openai"
model_name: str = "gpt-4o"
api_key: str = ""
api_base_url: str = ""
max_tokens: int = 4096
temperature: float = 0.7
timeout: int = 60
max_retries: int = 3
# OpenAI 专用
function_calling: bool = True
stream: bool = False
# Ollama / 本地模型
model_path: str = ""
ollama_host: str = "http://localhost:11434"
def __post_init__(self):
self.api_key = os.getenv("LLM_API_KEY", self.api_key)
self.api_base_url = os.getenv("LLM_API_BASE_URL", self.api_base_url)
self.model_name = os.getenv("LLM_MODEL_NAME", self.model_name)
self.model_path = os.getenv("LLM_MODEL_PATH", self.model_path)
@dataclass
class MCPConfig:
server_name: str = "DemoMCPServer"
transport: str = "stdio"
host: str = "localhost"
port: int = 3000
enabled_tools: list[str] = field(default_factory=lambda: [
"calculator", "web_search", "file_reader", "code_executor"
])
@dataclass
class WebSearchToolConfig:
max_results: int = 5
timeout: int = 10
api_key: str = ""
engine: str = "mock"
def __post_init__(self):
self.api_key = os.getenv("SEARCH_API_KEY", self.api_key)
@dataclass
class FileReaderToolConfig:
allowed_root: str = "./workspace"
max_file_size_kb: int = 512
@dataclass
class CodeExecutorToolConfig:
timeout: int = 5
sandbox: bool = True
@dataclass
class CalculatorToolConfig:
precision: int = 10
@dataclass
class ToolsConfig:
web_search: WebSearchToolConfig = field(default_factory=WebSearchToolConfig)
file_reader: FileReaderToolConfig = field(default_factory=FileReaderToolConfig)
code_executor: CodeExecutorToolConfig = field(default_factory=CodeExecutorToolConfig)
calculator: CalculatorToolConfig = field(default_factory=CalculatorToolConfig)
@dataclass
class MemoryConfig:
max_history: int = 20
enable_long_term: bool = False
vector_db_url: str = ""
@dataclass
class LoggingConfig:
level: str = "DEBUG"
enable_file: bool = True
log_dir: str = "./logs"
log_file: str = "agent.log"
def __post_init__(self):
self.level = os.getenv("LOG_LEVEL", self.level).upper()
@dataclass
class AgentConfig:
max_chain_steps: int = 10
enable_multi_step: bool = True
session_timeout: int = 3600
fallback_to_rules: bool = True # API 失败时降级到规则引擎
@dataclass
class AppConfig:
llm: LLMConfig = field(default_factory=LLMConfig)
mcp: MCPConfig = field(default_factory=MCPConfig)
tools: ToolsConfig = field(default_factory=ToolsConfig)
memory: MemoryConfig = field(default_factory=MemoryConfig)
logging: LoggingConfig = field(default_factory=LoggingConfig)
agent: AgentConfig = field(default_factory=AgentConfig)
def display(self) -> str:
lines = [
"" * 52,
" 📋 当前配置",
"" * 52,
f" [LLM] provider = {self.llm.provider}",
f" [LLM] model_name = {self.llm.model_name}",
f" [LLM] api_key = {'***' if self.llm.api_key else '(未设置)'}",
f" [LLM] api_base_url = {self.llm.api_base_url or '(默认)'}",
f" [LLM] temperature = {self.llm.temperature}",
f" [LLM] max_tokens = {self.llm.max_tokens}",
f" [LLM] function_calling = {self.llm.function_calling}",
f" [LLM] stream = {self.llm.stream}",
f" [LLM] max_retries = {self.llm.max_retries}",
f" [MCP] server_name = {self.mcp.server_name}",
f" [MCP] enabled_tools = {self.mcp.enabled_tools}",
f" [MEMORY] max_history = {self.memory.max_history}",
f" [AGENT] multi_step = {self.agent.enable_multi_step}",
f" [AGENT] fallback_rules = {self.agent.fallback_to_rules}",
f" [LOG] level = {self.logging.level}",
"" * 52,
]
return "\n".join(lines)
# ════════════════════════════════════════════════════════════════
# 配置加载器
# ════════════════════════════════════════════════════════════════
class ConfigLoader:
_CONFIG_SEARCH_PATHS = [
Path(os.getenv("AGENT_CONFIG_PATH", "./config.yaml")),
Path("config") / "config.yaml",
Path("config.yaml"),
]
@classmethod
def load(cls) -> AppConfig:
raw = cls._read_yaml()
return cls._parse(raw) if raw else AppConfig()
@classmethod
def _read_yaml(cls) -> dict[str, Any] | None:
if not _YAML_AVAILABLE:
print("⚠️ PyYAML 未安装pip install pyyaml使用默认配置")
return None
for path in cls._CONFIG_SEARCH_PATHS:
if path and path.exists():
with open(path, encoding="utf-8") as f:
data = yaml.safe_load(f)
print(f"✅ 已加载配置文件: {path.resolve()}")
return data or {}
print(" 未找到配置文件,使用默认配置")
return None
@classmethod
def _parse(cls, raw: dict[str, Any]) -> AppConfig:
return AppConfig(
llm=cls._parse_llm(raw.get("llm", {})),
mcp=cls._parse_mcp(raw.get("mcp", {})),
tools=cls._parse_tools(raw.get("tools", {})),
memory=cls._parse_memory(raw.get("memory", {})),
logging=cls._parse_logging(raw.get("logging", {})),
agent=cls._parse_agent(raw.get("agent", {})),
)
@staticmethod
def _parse_llm(d: dict) -> LLMConfig:
return LLMConfig(
provider=d.get("provider", "openai"),
model_name=d.get("model_name", "gpt-4o"),
api_key=d.get("api_key", ""),
api_base_url=d.get("api_base_url", ""),
max_tokens=int(d.get("max_tokens", 4096)),
temperature=float(d.get("temperature", 0.7)),
timeout=int(d.get("timeout", 60)),
max_retries=int(d.get("max_retries", 3)),
function_calling=bool(d.get("function_calling", True)),
stream=bool(d.get("stream", False)),
model_path=d.get("model_path", ""),
ollama_host=d.get("ollama_host", "http://localhost:11434"),
)
@staticmethod
def _parse_mcp(d: dict) -> MCPConfig:
return MCPConfig(
server_name=d.get("server_name", "DemoMCPServer"),
transport=d.get("transport", "stdio"),
host=d.get("host", "localhost"),
port=int(d.get("port", 3000)),
enabled_tools=d.get("enabled_tools", [
"calculator", "web_search", "file_reader", "code_executor"
]),
)
@staticmethod
def _parse_tools(d: dict) -> ToolsConfig:
ws = d.get("web_search", {})
fr = d.get("file_reader", {})
ce = d.get("code_executor", {})
ca = d.get("calculator", {})
return ToolsConfig(
web_search=WebSearchToolConfig(
max_results=int(ws.get("max_results", 5)),
timeout=int(ws.get("timeout", 10)),
api_key=ws.get("api_key", ""),
engine=ws.get("engine", "mock"),
),
file_reader=FileReaderToolConfig(
allowed_root=fr.get("allowed_root", "./workspace"),
max_file_size_kb=int(fr.get("max_file_size_kb", 512)),
),
code_executor=CodeExecutorToolConfig(
timeout=int(ce.get("timeout", 5)),
sandbox=bool(ce.get("sandbox", True)),
),
calculator=CalculatorToolConfig(
precision=int(ca.get("precision", 10)),
),
)
@staticmethod
def _parse_memory(d: dict) -> MemoryConfig:
return MemoryConfig(
max_history=int(d.get("max_history", 20)),
enable_long_term=bool(d.get("enable_long_term", False)),
vector_db_url=d.get("vector_db_url", ""),
)
@staticmethod
def _parse_logging(d: dict) -> LoggingConfig:
return LoggingConfig(
level=d.get("level", "DEBUG"),
enable_file=bool(d.get("enable_file", True)),
log_dir=d.get("log_dir", "./logs"),
log_file=d.get("log_file", "agent.log"),
)
@staticmethod
def _parse_agent(d: dict) -> AgentConfig:
return AgentConfig(
max_chain_steps=int(d.get("max_chain_steps", 10)),
enable_multi_step=bool(d.get("enable_multi_step", True)),
session_timeout=int(d.get("session_timeout", 3600)),
fallback_to_rules=bool(d.get("fallback_to_rules", True)),
)
# 全局单例
settings: AppConfig = ConfigLoader.load()

View File

@ -1,36 +1,30 @@
"""LLM 引擎:意图理解 & 工具决策"""
"""
llm/llm_engine.py
LLM 引擎负责意图理解工具选择决策最终回复生成
生产环境可替换 _call_llm_api() 为真实 API 调用OpenAI / Anthropic
LLM 引擎通过 Provider 接口调用真实 OpenAI API
支持 Function Calling 多步骤规划 + 工具结果整合回复
"""
import json
import re
from dataclasses import dataclass
from mcp.mcp_protocol import MCPRequest, MCPMethod, ToolSchema
from config.settings import LLMConfig, settings
from llm.provider_factory import create_provider
from llm.providers.base_provider import BaseProvider
from mcp.mcp_protocol import ChainPlan, MCPMethod, MCPRequest, ToolSchema, ToolStep
from utils.logger import get_logger
from openai import OpenAI
import os
os.environ["PYTHONIOENCODING"] = "utf-8"
# ── 工具调用决策结果 ───────────────────────────────────────────
@dataclass
class ToolDecision:
"""LLM 决策是否调用工具及调用参数"""
need_tool: bool
tool_name: str = ""
tool_name: str = ""
arguments: dict = None
reasoning: str = "" # 推理过程说明
reasoning: str = ""
def __post_init__(self):
self.arguments = self.arguments or {}
def to_mcp_request(self) -> MCPRequest | None:
"""将工具决策转换为 MCP 请求"""
if not self.need_tool:
return None
return MCPRequest(
@ -39,246 +33,347 @@ class ToolDecision:
)
class MonicaClient:
BASE_URL = "https://openapi.monica.im/v1"
def __init__(self, api_key):
self.client = OpenAI(base_url=self.BASE_URL,
api_key=api_key)
self.logger = get_logger("Monica")
def create(self, model_name: str, tool_schemas, user_input: str, agent_prompt: str = "") -> ToolDecision:
tools = [{
"name": s.name,
"description": s.description,
"parameters": s.parameters} for s in tool_schemas]
messages = []
if agent_prompt:
messages.append({
"role": "system",
"content": agent_prompt,
})
messages.append({
"role": "user",
"content": [{
"type": "text",
"text": user_input
}]
})
completion = self.client.chat.completions.create(
model=model_name,
functions=tools,
messages=messages
)
self.logger.info(completion.choices[0].message.content)
response = json.loads(completion.choices[0].message.content)
return ToolDecision(need_tool=response['need_tool'],
tool_name=response['tool_name'],
arguments=response['arguments'],
reasoning=response['reasoning'])
def chat(self, model_name: str, user_input: str, context: str = '') -> str:
message = f"""##用户输入\n{user_input}\n\n"""\
f"""##历史消息\n{context}\n\n"""
messages = [{
"role": "user",
"content": [{
"type": "text",
"text": message
}]
}]
completion = self.client.chat.completions.create(
model=model_name,
messages=messages
)
self.logger.info(completion.choices[0].message.content)
return completion.choices[0].message.content
# ── LLM 引擎 ──────────────────────────────────────────────────
class LLMEngine:
"""
LLM 推理引擎ReAct 模式
LLM 推理引擎Provider 模式
执行流程:
1. 接收用户输入 + 工具列表
2. 分析意图决策是否调用工具think
3. 若需要工具生成 MCPRequestact
4. 接收工具结果生成最终回复observe
核心流程:
1. plan_tool_chain()
构造 OpenAI 格式消息 + tools
Provider.plan_with_tools()
解析 tool_calls ChainPlan
生产环境替换:
_call_llm_api() 替换为真实 LLM API 调用即可
其余流程控制逻辑保持不变
2. generate_chain_reply()
构造含工具结果的完整消息历史
Provider.generate_reply()
最终自然语言回复
降级策略:
API 调用失败 fallback_to_rules=true
自动切换到规则引擎保证系统可用性
"""
API_KEY = "sk-AUmOuFI731Ty5Nob38jY26d8lydfDT-QkE2giqb0sCuPCAE2JH6zjLM4lZLpvL5WMYPOocaMe2FwVDmqM_9KimmKACjR"
# 规则引擎关键词(降级时使用)
_MULTI_STEP_KEYWORDS = [
"然后", "接着", "", "并且", "同时", "之后",
"先.*再", "首先.*然后", "搜索.*计算", "读取.*执行",
"多个", "分别", "依次",
]
def __init__(self, model_name: str = "claude-sonnet-4-6"):
self.model_name = model_name
self.logger = get_logger("LLM")
self.logger.info(f"🧠 LLM 引擎初始化,模型: {model_name}")
self.client = MonicaClient(api_key=self.API_KEY)
def __init__(self, cfg: LLMConfig | None = None):
self.cfg = cfg or settings.llm
self.logger = get_logger("LLM")
self.provider: BaseProvider = create_provider(self.cfg)
self._log_init()
# ── 核心推理流程 ────────────────────────────────────────────
def _log_init(self) -> None:
self.logger.info("🧠 LLM 引擎初始化完成")
self.logger.info(f" provider = {self.cfg.provider}")
self.logger.info(f" model_name = {self.cfg.model_name}")
self.logger.info(f" function_calling = {self.cfg.function_calling}")
self.logger.info(f" temperature = {self.cfg.temperature}")
self.logger.info(f" fallback_rules = {settings.agent.fallback_to_rules}")
def think_and_decide(
self,
user_input: str,
tool_schemas: list[ToolSchema],
context: str = "",
agent_prompt: str = ""
) -> ToolDecision:
def reconfigure(self, cfg: LLMConfig) -> None:
"""热更新配置并重建 Provider"""
self.cfg = cfg
self.provider = create_provider(cfg)
self.logger.info(f"🔄 LLM 配置已更新: model={cfg.model_name}")
# ════════════════════════════════════════════════════════════
# 核心接口
# ════════════════════════════════════════════════════════════
def plan_tool_chain(
self,
user_input: str,
tool_schemas: list[ToolSchema],
context: str = "",
history: list[dict] | None = None,
) -> ChainPlan:
"""
Step 1 & 2: 理解意图决策工具调用Think 阶段
使用 OpenAI Function Calling 规划工具调用链
消息构造策略:
system 规划器系统提示
history 历史对话可选
user 当前用户输入
Args:
user_input: 用户输入文本
tool_schemas: 可用工具的 Schema 列表
context: 对话历史上下文摘要
agent_prompt: 智能体提示词
tool_schemas: 可用工具列表
context: 对话历史摘要文本格式用于无 history
history: 结构化对话历史OpenAI 消息格式优先使用
Returns:
ToolDecision 实例
ChainPlan 实例
"""
self.logger.info(f"💭 分析意图: {user_input[:50]}...")
self.logger.info(f"🗺 规划工具调用链: {user_input[:60]}...")
# 构造 Prompt生产环境发送给真实 LLM
prompt = self._build_decision_prompt(user_input, tool_schemas, context)
self.logger.debug(f"📝 Prompt 已构造 ({len(prompt)} chars)")
# 构造消息列表
messages = self._build_plan_messages(user_input, context, history)
# 调用 LLMDemo 中使用规则模拟)
# decision = self._call_llm_api(user_input, tool_schemas)
decision = self._call_llm_api(prompt, tool_schemas, agent_prompt=agent_prompt)
if self.cfg.function_calling:
# ── 真实 OpenAI Function Calling ──────────────────
result = self.provider.plan_with_tools(messages, tool_schemas)
self.logger.info(
f"🎯 决策结果: {'调用工具 [' + decision.tool_name + ']' if decision.need_tool else '直接回复'}"
if result.success and result.plan is not None:
plan = result.plan
# 补充 goal 字段
if not plan.goal:
plan.goal = user_input
self.logger.info(f"📋 OpenAI 规划完成: {plan.step_count}")
for step in plan.steps:
self.logger.info(
f" Step {step.step_id}: [{step.tool_name}] "
f"args={step.arguments}"
)
return plan
# API 失败处理
self.logger.warning(f"⚠️ OpenAI 规划失败: {result.error}")
if settings.agent.fallback_to_rules:
self.logger.info("🔄 降级到规则引擎...")
return self._rule_based_plan(user_input)
return ChainPlan(goal=user_input, steps=[])
else:
# function_calling=false 时直接使用规则引擎
self.logger.info("⚙️ function_calling=false使用规则引擎")
return self._rule_based_plan(user_input)
def think_and_decide(
self,
user_input: str,
tool_schemas: list[ToolSchema],
context: str = "",
) -> ToolDecision:
"""单步工具决策(代理到 plan_tool_chain"""
plan = self.plan_tool_chain(user_input, tool_schemas, context)
if not plan.steps:
return ToolDecision(need_tool=False, reasoning="无需工具,直接回复")
first = plan.steps[0]
return ToolDecision(
need_tool=True,
tool_name=first.tool_name,
arguments=first.arguments,
reasoning=first.description,
)
self.logger.debug(f"💡 推理: {decision.reasoning}")
return decision
def generate_final_reply(
self,
user_input: str,
tool_name: str,
tool_output: str,
context: str = "",
def generate_chain_reply(
self,
user_input: str,
chain_summary: str,
context: str = "",
tool_messages: list[dict] | None = None,
) -> str:
"""
Step 5: 整合工具结果生成最终自然语言回复Observe 阶段
整合多步骤执行结果调用 OpenAI 生成最终自然语言回复
消息构造含工具执行结果:
system 回复生成系统提示
user 原始用户输入
assistant 工具调用决策tool_calls
tool 工具执行结果
...多轮工具调用
Args:
user_input: 原始用户输入
tool_name: 被调用的工具名称
tool_output: 工具返回的原始输出
context: 对话历史上下文
user_input: 原始用户输入
chain_summary: 步骤摘要API 失败时的降级内容
context: 对话历史
tool_messages: 完整的工具调用消息序列OpenAI 格式
Returns:
最终回复字符串
"""
self.logger.info("✍️ 整合工具结果,生成最终回复...")
self.logger.info("✍️ 生成最终回复...")
# 生产环境:将 tool_output 注入 Prompt调用 LLM 生成回复
reply = self._synthesize_reply(user_input, tool_name, tool_output)
self.logger.info(f"💬 回复已生成 ({len(reply)} chars)")
return reply
if tool_messages:
# 构造含工具结果的完整消息历史
messages = self._build_reply_messages(user_input, tool_messages)
result = self.provider.generate_reply(messages)
if result.success and result.content:
self.logger.info(
f"✅ OpenAI 回复生成成功 ({len(result.content)} chars)"
)
return result.content
self.logger.warning(f"⚠️ OpenAI 回复生成失败: {result.error}")
# 降级:使用模板回复
return self._fallback_chain_reply(user_input, chain_summary)
def generate_final_reply(
self,
user_input: str,
tool_name: str,
tool_output: str,
context: str = "",
tool_call_id: str = "",
) -> str:
"""单步工具结果整合(调用 OpenAI 生成自然语言回复)"""
self.logger.info(f"✍️ 整合单步工具结果 [{tool_name}]...")
# 构造单步工具消息
tool_messages = []
if tool_call_id:
tool_messages = [
{
"role": "tool",
"content": tool_output,
"tool_call_id": tool_call_id,
}
]
return self.generate_chain_reply(
user_input=user_input,
chain_summary=tool_output,
context=context,
tool_messages=tool_messages,
)
def generate_direct_reply(self, user_input: str, context: str = "") -> str:
"""无需工具时直接生成回复"""
self.logger.info("💬 直接生成回复(无需工具)")
return self.client.chat(self.model_name, user_input, context=context)
# ── Prompt 构造 ─────────────────────────────────────────────
def _build_decision_prompt(
self,
user_input: str,
tool_schemas: list[ToolSchema],
context: str,
) -> str:
"""构造工具决策 PromptReAct 格式)"""
tools_desc = "\n".join(
f"- {s.name}: {s.description}" for s in tool_schemas
)
"""无需工具时直接调用 OpenAI 生成回复"""
self.logger.info("💬 直接生成回复(无需工具)...")
messages = [
{"role": "system", "content": "你是一个友好、专业的 AI 助手,请简洁准确地回答用户问题。"},
{"role": "user", "content": user_input},
]
result = self.provider.generate_reply(messages)
if result.success and result.content:
return result.content
# 降级
return (
f"你是一个智能助手,请分析用户输入并决定是否需要调用工具。\n\n"
f"## 可用工具\n{tools_desc}\n\n"
f"## 对话历史\n{context or '(无)'}\n\n"
f"## 用户输入\n{user_input}\n\n"
f"## 指令\n"
f"以纯 JSON 格式回复,不要嵌入到其他对象中,如下:\n"
f'{{"need_tool": true/false, "tool_name": "...", "arguments": {{...}}, "reasoning": "..."}}'
f"[{self.cfg.model_name}] 您好!\n"
f"关于「{user_input}」,我已收到您的问题。\n"
f"API 暂时不可用,请检查 API Key 配置)"
)
# ── 模拟 LLM APIDemo 用规则引擎替代)────────────────────
# ════════════════════════════════════════════════════════════
# 消息构造
# ════════════════════════════════════════════════════════════
def _call_llm_api(self, user_input: str, tool_schemas: list[ToolSchema], agent_prompt: str = "") -> ToolDecision:
"""
模拟 LLM API 调用Demo 版本使用关键词规则
@staticmethod
def _build_plan_messages(
user_input: str,
context: str,
history: list[dict] | None,
) -> list[dict]:
"""构造规划阶段的消息列表"""
from llm.providers.openai_provider import OpenAIProvider
messages: list[dict] = [
{"role": "system", "content": OpenAIProvider._PLANNER_SYSTEM_PROMPT},
]
# 注入结构化历史(优先)或文本摘要
if history:
messages.extend(history[-6:]) # 最近 3 轮
elif context and context != "(暂无对话历史)":
messages.append({
"role": "system",
"content": f"## 对话历史\n{context}",
})
messages.append({"role": "user", "content": user_input})
return messages
生产环境替换示例:
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model=self.model_name,
tools=[s.to_dict() for s in tool_schemas],
messages=[{"role": "user", "content": user_input}]
@staticmethod
def _build_reply_messages(
user_input: str,
tool_messages: list[dict],
) -> list[dict]:
"""构造回复生成阶段的消息列表(含工具执行结果)"""
from llm.providers.openai_provider import OpenAIProvider
messages: list[dict] = [
{"role": "system", "content": OpenAIProvider._REPLY_SYSTEM_PROMPT},
{"role": "user", "content": user_input},
]
messages.extend(tool_messages)
return messages
# ════════════════════════════════════════════════════════════
# 降级规则引擎
# ════════════════════════════════════════════════════════════
def _rule_based_plan(self, user_input: str) -> ChainPlan:
"""规则引擎API 不可用时的降级方案)"""
self.logger.info("⚙️ 使用规则引擎规划...")
text = user_input.lower()
# 搜索 + 计算
if (any(k in text for k in ["搜索", "查询", "查一下"]) and
any(k in text for k in ["计算", "", "等于", "结果"])):
return ChainPlan(
goal=user_input,
steps=[
ToolStep(1, "web_search",
{"query": user_input,
"max_results": settings.tools.web_search.max_results},
"搜索相关信息", []),
ToolStep(2, "calculator",
{"expression": self._extract_expression(user_input)},
"进行计算", [1]),
],
)
# 解析 response.content 中的 tool_use block
"""
if self.client:
return self.client.create(self.model_name,
user_input=user_input,
tool_schemas=tool_schemas,
agent_prompt=agent_prompt)
else:
text = user_input.lower()
# 规则匹配:计算器
calc_pattern = re.search(r"[\d\s\+\-\*\/\(\)\^]+[=?]?", user_input)
if any(kw in text for kw in ["计算", "等于", "多少", "×", "÷"]) and calc_pattern:
expr = re.sub(r"[^0-9+\-*/().**]", "", user_input.replace("×", "*").replace("÷", "/"))
return ToolDecision(
need_tool=True, tool_name="calculator",
arguments={"expression": expr or "1+1"},
reasoning="用户请求数学计算,调用 calculator 工具",
)
# 规则匹配:搜索
if any(kw in text for kw in ["搜索", "查询", "天气", "新闻", "查一下", "search"]):
return ToolDecision(
need_tool=True, tool_name="web_search",
arguments={"query": user_input, "max_results": 3},
reasoning="用户需要实时信息,调用 web_search 工具",
)
# 规则匹配:文件读取
if any(kw in text for kw in ["文件", "读取", "file", "config", "json", "txt"]):
filename = re.search(r"[\w\-\.]+\.\w+", user_input)
return ToolDecision(
need_tool=True, tool_name="file_reader",
arguments={"path": filename.group() if filename else "config.json"},
reasoning="用户请求读取文件,调用 file_reader 工具",
)
# 规则匹配:代码执行
if any(kw in text for kw in ["执行", "运行", "代码", "python", "print", "code"]):
code_match = re.search(r'[`\'"](.+?)[`\'"]', user_input)
code = code_match.group(1) if code_match else 'print("Hello, Agent!")'
return ToolDecision(
need_tool=True, tool_name="code_executor",
arguments={"code": code, "timeout": 5},
reasoning="用户请求执行代码,调用 code_executor 工具",
)
# 默认:直接回复
return ToolDecision(
need_tool=False,
reasoning="问题可直接回答,无需工具",
# 读取文件 + 执行代码
if (any(k in text for k in ["读取", "文件", "file"]) and
any(k in text for k in ["执行", "运行", "run"])):
fname = re.search(r"[\w\-\.]+\.\w+", user_input)
return ChainPlan(
goal=user_input,
steps=[
ToolStep(1, "file_reader",
{"path": fname.group() if fname else "script.py"},
"读取文件", []),
ToolStep(2, "code_executor",
{"code": "{{STEP_1_OUTPUT}}",
"timeout": settings.tools.code_executor.timeout},
"执行代码", [1]),
],
)
return self._rule_single_step(user_input)
def _synthesize_reply(self, user_input: str, tool_name: str, tool_output: str) -> str:
"""基于工具输出合成最终回复Demo 版本)"""
def _rule_single_step(self, user_input: str) -> ChainPlan:
"""单步规则匹配"""
text = user_input.lower()
if any(k in text for k in ["计算", "等于", "×", "÷", "+", "-", "*", "/"]):
expr = self._extract_expression(user_input)
return ChainPlan(goal=user_input, is_single=True,
steps=[ToolStep(1, "calculator",
{"expression": expr}, "数学计算")])
if any(k in text for k in ["搜索", "查询", "天气", "新闻"]):
return ChainPlan(goal=user_input, is_single=True,
steps=[ToolStep(1, "web_search",
{"query": user_input,
"max_results": settings.tools.web_search.max_results},
"网络搜索")])
if any(k in text for k in ["文件", "读取", "file"]):
fname = re.search(r"[\w\-\.]+\.\w+", user_input)
return ChainPlan(goal=user_input, is_single=True,
steps=[ToolStep(1, "file_reader",
{"path": fname.group() if fname else "config.json"},
"读取文件")])
if any(k in text for k in ["执行", "运行", "代码", "python"]):
code_m = re.search(r"[`'\"](.+?)[`'\"]", user_input)
code = code_m.group(1) if code_m else 'print("Hello, Agent!")'
return ChainPlan(goal=user_input, is_single=True,
steps=[ToolStep(1, "code_executor",
{"code": code,
"timeout": settings.tools.code_executor.timeout},
"执行代码")])
return ChainPlan(goal=user_input, is_single=True, steps=[])
@staticmethod
def _fallback_chain_reply(user_input: str, chain_summary: str) -> str:
"""API 不可用时的模板回复"""
return (
f"✅ 已通过 [{tool_name}] 工具处理您的请求。\n\n"
f"**执行结果:**\n{tool_output}\n\n"
f"---\n*由 {self.model_name} 生成 · 工具: {tool_name}*"
f"**任务已完成**\n\n"
f"针对您的需求「{user_input}」,执行结果如下:\n\n"
f"{chain_summary}"
)
@staticmethod
def _extract_expression(text: str) -> str:
cleaned = text.replace("×", "*").replace("÷", "/").replace("", "")
match = re.search(r"[\d\s\+\-\*\/\(\)\.]+", cleaned)
expr = match.group().strip() if match else "1+1"
return expr if len(expr) > 1 else "1+1"

60
llm/provider_factory.py Normal file
View File

@ -0,0 +1,60 @@
"""
llm/provider_factory.py
Provider 工厂根据 settings.llm.provider 自动实例化对应 Provider
"""
from config.settings import LLMConfig, settings
from llm.providers.base_provider import BaseProvider
from utils.logger import get_logger
_logger = get_logger("LLM")
def create_provider(cfg: LLMConfig | None = None) -> BaseProvider:
"""
工厂函数根据配置创建对应的 LLM Provider
Args:
cfg: LLMConfig 实例None 时从全局 settings 读取
Returns:
BaseProvider 子类实例
支持的 provider:
- "openai" OpenAIProvider含兼容 OpenAI 协议的代理
- "anthropic" AnthropicProvider预留
- "ollama" OllamaProvider预留
Raises:
ValueError: provider 名称不支持时
"""
cfg = cfg or settings.llm
_logger.info(f"🏭 Provider 工厂: 创建 [{cfg.provider}] Provider")
match cfg.provider.lower():
case "openai":
from llm.providers.openai_provider import OpenAIProvider
return OpenAIProvider(cfg)
case "anthropic":
# 预留Anthropic Provider
# from llm.providers.anthropic_provider import AnthropicProvider
# return AnthropicProvider(cfg)
_logger.warning("⚠️ Anthropic Provider 尚未实现,回退到 OpenAI")
from llm.providers.openai_provider import OpenAIProvider
return OpenAIProvider(cfg)
case "ollama":
# 预留Ollama 本地模型 Provider
# from llm.providers.ollama_provider import OllamaProvider
# return OllamaProvider(cfg)
_logger.warning("⚠️ Ollama Provider 尚未实现,回退到 OpenAI")
from llm.providers.openai_provider import OpenAIProvider
return OpenAIProvider(cfg)
case _:
raise ValueError(
f"不支持的 provider: '{cfg.provider}'"
f"可选值: openai / anthropic / ollama"
)

View File

View File

@ -0,0 +1,125 @@
"""
llm/providers/base_provider.py
LLM Provider 抽象基类定义所有 Provider 必须实现的统一接口
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any
from mcp.mcp_protocol import ChainPlan, ToolSchema
# ════════════════════════════════════════════════════════════════
# Provider 返回数据结构
# ════════════════════════════════════════════════════════════════
@dataclass
class PlanResult:
"""
工具调用链规划结果
Attributes:
plan: 解析出的 ChainPlan成功时
raw_response: 原始 API 响应用于调试
usage: Token 用量统计
success: 是否成功
error: 失败原因
"""
plan: ChainPlan | None
raw_response: Any = None
usage: dict[str, int] = field(default_factory=dict)
success: bool = True
error: str = ""
@property
def prompt_tokens(self) -> int:
return self.usage.get("prompt_tokens", 0)
@property
def completion_tokens(self) -> int:
return self.usage.get("completion_tokens", 0)
@dataclass
class ReplyResult:
"""
最终回复生成结果
Attributes:
content: 生成的自然语言回复
usage: Token 用量统计
success: 是否成功
error: 失败原因
"""
content: str
usage: dict[str, int] = field(default_factory=dict)
success: bool = True
error: str = ""
# ════════════════════════════════════════════════════════════════
# 抽象基类
# ════════════════════════════════════════════════════════════════
class BaseProvider(ABC):
"""
LLM Provider 抽象基类
所有具体 ProviderOpenAI / Anthropic / Ollama必须继承此类
并实现以下两个核心方法:
- plan_with_tools() 工具调用链规划Function Calling
- generate_reply() 最终回复生成
"""
@property
@abstractmethod
def provider_name(self) -> str:
"""Provider 名称标识,如 'openai' / 'anthropic'"""
...
@abstractmethod
def plan_with_tools(
self,
messages: list[dict],
tool_schemas: list[ToolSchema],
) -> PlanResult:
"""
使用 Function Calling 规划工具调用链
Args:
messages: 对话历史消息列表OpenAI 格式
tool_schemas: 可用工具的 Schema 列表
Returns:
PlanResult 实例
"""
...
@abstractmethod
def generate_reply(
self,
messages: list[dict],
) -> ReplyResult:
"""
基于完整对话历史含工具执行结果生成最终回复
Args:
messages: 包含 tool 角色消息的完整对话历史
Returns:
ReplyResult 实例
"""
...
def health_check(self) -> bool:
"""
连通性检测可选实现
Returns:
True 表示 API 可用
"""
return True
def __repr__(self) -> str:
return f"{self.__class__.__name__}(provider={self.provider_name})"

View File

@ -0,0 +1,391 @@
"""
llm/providers/openai_provider.py
OpenAI Provider使用 Function Calling 实现工具链规划与回复生成
核心流程:
1. plan_with_tools()
messages + tools OpenAI API
解析 tool_calls ChainPlan
2. generate_reply()
messages tool 结果 OpenAI API
最终自然语言回复
依赖:
pip install openai>=1.0.0
"""
import json
import time
from typing import Any
from config.settings import LLMConfig
from llm.providers.base_provider import BaseProvider, PlanResult, ReplyResult
from mcp.mcp_protocol import ChainPlan, ToolSchema, ToolStep
from utils.logger import get_logger
# OpenAI SDK运行时导入避免未安装时整体崩溃
try:
from openai import (
APIConnectionError,
APIStatusError,
APITimeoutError,
AuthenticationError,
OpenAI,
RateLimitError,
)
_OPENAI_AVAILABLE = True
except ImportError:
_OPENAI_AVAILABLE = False
class OpenAIProvider(BaseProvider):
"""
OpenAI Provider 实现
支持:
- 标准 OpenAI APIgpt-4o / gpt-4-turbo / gpt-3.5-turbo
- 兼容 OpenAI 协议的第三方代理通过 api_base_url 配置
- Function Calling 多工具并行/串行规划
- 自动重试RateLimit / 网络超时
- API 不可用时降级到规则引擎
配置示例config.yaml:
llm:
provider: "openai"
model_name: "gpt-4o"
api_key: "sk-..."
api_base_url: "" # 留空使用官方地址
temperature: 0.2 # 规划任务建议低温度
function_calling: true
"""
# 系统 Prompt指导 LLM 进行多步骤工具规划
_PLANNER_SYSTEM_PROMPT = """\
你是一个智能任务规划助手擅长将用户需求分解为多个工具调用步骤
## 工作原则
1. 仔细分析用户需求判断是否需要调用工具
2. 如需多个工具按逻辑顺序依次调用
3. 当后续步骤依赖前步结果时先完成前步再继续
4. 每次只规划并调用当前最合适的工具
5. 所有工具执行完毕后整合结果给出最终回复
## 重要规则
- 数学计算必须使用 calculator 工具不要自行计算
- 需要实时信息时使用 web_search 工具
- 文件操作使用 file_reader 工具
- 代码执行使用 code_executor 工具
"""
# 回复生成系统 Prompt
_REPLY_SYSTEM_PROMPT = """\
你是一个友好专业的 AI 助手
请基于已执行的工具调用结果用清晰自然的语言回答用户的问题
回复要简洁明了重点突出工具执行的关键结果
"""
def __init__(self, cfg: LLMConfig):
self.cfg = cfg
self.logger = get_logger("LLM")
self._client: "OpenAI | None" = None
if not _OPENAI_AVAILABLE:
self.logger.warning("⚠️ openai 包未安装,请执行: pip install openai>=1.0.0")
else:
self._init_client()
# ── Provider 标识 ────────────────────────────────────────────
@property
def provider_name(self) -> str:
return "openai"
# ── 客户端初始化 ─────────────────────────────────────────────
def _init_client(self) -> None:
"""初始化 OpenAI 客户端"""
if not self.cfg.api_key:
self.logger.warning(
"⚠️ LLM_API_KEY 未设置OpenAI API 调用将失败。\n"
" 请设置环境变量: export LLM_API_KEY=sk-..."
)
kwargs: dict[str, Any] = {
"api_key": self.cfg.api_key or "sk-placeholder",
"timeout": self.cfg.timeout,
"max_retries": self.cfg.max_retries,
}
if self.cfg.api_base_url:
kwargs["base_url"] = self.cfg.api_base_url
self.logger.info(f"🔗 使用自定义 API 地址: {self.cfg.api_base_url}")
self._client = OpenAI(**kwargs)
self.logger.info(
f"✅ OpenAI 客户端初始化完成\n"
f" model = {self.cfg.model_name}\n"
f" base_url = {self.cfg.api_base_url or 'https://api.openai.com/v1'}\n"
f" max_retries= {self.cfg.max_retries}"
)
# ════════════════════════════════════════════════════════════
# 核心接口实现
# ════════════════════════════════════════════════════════════
def plan_with_tools(
self,
messages: list[dict],
tool_schemas: list[ToolSchema],
) -> PlanResult:
"""
调用 OpenAI Function Calling 规划工具调用链
OpenAI 消息格式:
[
{"role": "system", "content": "..."},
{"role": "user", "content": "用户输入"},
]
OpenAI tools 格式:
[
{
"type": "function",
"function": {
"name": "calculator",
"description": "计算数学表达式",
"parameters": {"type": "object", "properties": {...}}
}
}
]
返回 tool_calls 示例:
[
{
"id": "call_abc123",
"type": "function",
"function": {"name": "calculator", "arguments": '{"expression":"1+1"}'}
}
]
"""
if not self._client:
return PlanResult(plan=None, success=False, error="OpenAI 客户端未初始化")
# 构造 OpenAI tools 参数
tools = self._build_openai_tools(tool_schemas)
self.logger.debug(f"📤 发送规划请求tools 数量: {len(tools)}")
self.logger.debug(f"📤 消息历史长度: {len(messages)}")
try:
response = self._client.chat.completions.create(
model=self.cfg.model_name,
messages=messages,
tools=tools,
tool_choice="auto", # 由模型决定是否调用工具
temperature=self.cfg.temperature,
max_tokens=self.cfg.max_tokens,
)
usage = self._extract_usage(response)
self.logger.info(
f"📊 Token 用量: prompt={usage.get('prompt_tokens', 0)}, "
f"completion={usage.get('completion_tokens', 0)}"
)
# 解析 tool_calls → ChainPlan
choice = response.choices[0]
message = choice.message
if not message.tool_calls:
# 模型决定不调用工具,直接回复
self.logger.info("💬 模型决策: 无需工具,直接回复")
return PlanResult(
plan=ChainPlan(goal="", steps=[]),
raw_response=response,
usage=usage,
)
plan = self._parse_tool_calls(message.tool_calls)
self.logger.info(f"📋 解析到 {plan.step_count} 个工具调用步骤")
return PlanResult(
plan=plan,
raw_response=response,
usage=usage,
)
except AuthenticationError as e:
return self._handle_error("认证失败,请检查 API Key", e)
except RateLimitError as e:
return self._handle_error("请求频率超限,请稍后重试", e)
except APITimeoutError as e:
return self._handle_error(f"请求超时(>{self.cfg.timeout}s", e)
except APIConnectionError as e:
return self._handle_error("网络连接失败,请检查网络或 api_base_url", e)
except APIStatusError as e:
return self._handle_error(f"API 错误 HTTP {e.status_code}: {e.message}", e)
except Exception as e:
return self._handle_error(f"未知错误: {e}", e)
def generate_reply(
self,
messages: list[dict],
) -> ReplyResult:
"""
基于完整对话历史含工具执行结果生成最终自然语言回复
消息格式示例含工具结果:
[
{"role": "system", "content": "..."},
{"role": "user", "content": "搜索天气然后计算..."},
{"role": "assistant", "content": None,
"tool_calls": [{"id":"call_1","function":{"name":"web_search",...}}]},
{"role": "tool", "content": "搜索结果...", "tool_call_id": "call_1"},
{"role": "assistant", "content": None,
"tool_calls": [{"id":"call_2","function":{"name":"calculator",...}}]},
{"role": "tool", "content": "计算结果: 312", "tool_call_id": "call_2"},
]
"""
if not self._client:
return ReplyResult(content="", success=False, error="OpenAI 客户端未初始化")
self.logger.debug(f"📤 发送回复生成请求,消息长度: {len(messages)}")
try:
response = self._client.chat.completions.create(
model=self.cfg.model_name,
messages=messages,
temperature=self.cfg.temperature,
max_tokens=self.cfg.max_tokens,
)
content = response.choices[0].message.content or ""
usage = self._extract_usage(response)
self.logger.info(
f"✅ 回复生成成功,长度: {len(content)} chars"
f"Token: {usage.get('completion_tokens', 0)}"
)
return ReplyResult(content=content, usage=usage)
except AuthenticationError as e:
return ReplyResult(content="", success=False,
error=f"认证失败: {e}")
except RateLimitError as e:
return ReplyResult(content="", success=False,
error=f"频率超限: {e}")
except APITimeoutError as e:
return ReplyResult(content="", success=False,
error=f"请求超时: {e}")
except Exception as e:
return ReplyResult(content="", success=False,
error=f"生成回复失败: {e}")
def health_check(self) -> bool:
"""发送最小请求检测 API 连通性"""
if not self._client:
return False
try:
self._client.chat.completions.create(
model=self.cfg.model_name,
messages=[{"role": "user", "content": "hi"}],
max_tokens=1,
)
return True
except Exception:
return False
# ════════════════════════════════════════════════════════════
# 工具方法
# ════════════════════════════════════════════════════════════
@staticmethod
def _build_openai_tools(tool_schemas: list[ToolSchema]) -> list[dict]:
"""
ToolSchema 列表转换为 OpenAI tools 参数格式
OpenAI 格式:
{
"type": "function",
"function": {
"name": "calculator",
"description": "计算数学表达式",
"parameters": {
"type": "object",
"properties": {
"expression": {"type": "string", "description": "..."}
},
"required": ["expression"]
}
}
}
"""
tools = []
for schema in tool_schemas:
tools.append({
"type": "function",
"function": {
"name": schema.name,
"description": schema.description,
"parameters": {
"type": "object",
"properties": schema.parameters,
"required": list(schema.parameters.keys()),
},
},
})
return tools
@staticmethod
def _parse_tool_calls(tool_calls: list) -> ChainPlan:
"""
OpenAI tool_calls 解析为 ChainPlan
OpenAI tool_calls 格式:
[
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "calculator",
"arguments": '{"expression": "1+2"}' JSON 字符串
}
}
]
"""
steps: list[ToolStep] = []
for idx, tc in enumerate(tool_calls):
fn = tc.function
tool_name = fn.name
try:
arguments = json.loads(fn.arguments)
except json.JSONDecodeError:
arguments = {"raw": fn.arguments}
steps.append(ToolStep(
step_id=idx + 1,
tool_name=tool_name,
arguments=arguments,
description=f"调用 {tool_name}(由 OpenAI Function Calling 规划)",
depends_on=list(range(1, idx + 1)) if idx > 0 else [],
))
goal = "".join(s.tool_name for s in steps)
return ChainPlan(
goal=goal,
steps=steps,
is_single=len(steps) == 1,
)
@staticmethod
def _extract_usage(response: Any) -> dict[str, int]:
"""提取 Token 用量信息"""
if hasattr(response, "usage") and response.usage:
return {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens,
}
return {}
def _handle_error(self, msg: str, exc: Exception) -> PlanResult:
self.logger.error(f"❌ OpenAI API 错误: {msg}")
self.logger.debug(f" 原始异常: {exc}")
return PlanResult(plan=None, success=False, error=msg)

View File

@ -1162,3 +1162,430 @@ The function `get_system_name()` uses `platform.system()` to determine the syste
[2026-03-02 14:27:44,134] [agent.MCP] INFO: 📨 收到请求 id=9118bd32 method=tools/call
[2026-03-02 14:27:44,134] [agent.TOOL] INFO: ▶ 执行工具 [tool_generator],参数: {'name': 'get_system_name', 'description': 'Returns the name of the operating system of the machine.', 'parameters': {}, 'code': 'import platform\ndef get_system_name():\n return platform.system()'}
[2026-03-02 14:27:44,134] [agent.TOOL] INFO: ✅ 工具 [tool_generator] 执行成功
[2026-03-09 13:11:25,019] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统(配置文件驱动)...
[2026-03-09 13:11:25,019] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────
[LLM] provider = anthropic
[LLM] model_name = claude-sonnet-4-6
[LLM] model_path = (未设置)
[LLM] api_key = (未设置)
[LLM] temperature = 0.7
[MCP] server_name = DemoMCPServer
[MCP] transport = stdio
[MCP] tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] max_steps = 10
[LOG] level = DEBUG
──────────────────────────────────────────────────
[2026-03-09 13:11:25,020] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 13:11:25,020] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:11:25,020] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:11:25,021] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:11:25,021] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:11:25,021] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:11:25,022] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:11:25,022] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:11:25,022] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:11:25,022] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:11:25,022] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:11:25,023] [agent.LLM] INFO: 🧠 LLM 引擎初始化
[2026-03-09 13:11:25,024] [agent.LLM] INFO: provider = anthropic
[2026-03-09 13:11:25,024] [agent.LLM] INFO: model_name = claude-sonnet-4-6
[2026-03-09 13:11:25,024] [agent.LLM] INFO: model_path = (未设置)
[2026-03-09 13:11:25,025] [agent.LLM] INFO: api_base = (默认)
[2026-03-09 13:11:25,025] [agent.LLM] INFO: temperature= 0.7
[2026-03-09 13:11:25,025] [agent.LLM] INFO: max_tokens = 4096
[2026-03-09 13:11:25,026] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:11:25,027] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成(支持多步串行调用)
[2026-03-09 13:11:25,027] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:14:47,765] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统(配置文件驱动)...
[2026-03-09 13:14:47,768] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────
[LLM] provider = anthropic
[LLM] model_name = claude-sonnet-4-6
[LLM] model_path = (未设置)
[LLM] api_key = (未设置)
[LLM] temperature = 0.7
[MCP] server_name = DemoMCPServer
[MCP] transport = stdio
[MCP] tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] max_steps = 10
[LOG] level = DEBUG
──────────────────────────────────────────────────
[2026-03-09 13:14:47,769] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 13:14:47,769] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:14:47,769] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:14:47,770] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:14:47,770] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:14:47,770] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:14:47,770] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:14:47,771] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:14:47,771] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:14:47,771] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:14:47,771] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:14:47,772] [agent.LLM] INFO: 🧠 LLM 引擎初始化
[2026-03-09 13:14:47,773] [agent.LLM] INFO: provider = anthropic
[2026-03-09 13:14:47,773] [agent.LLM] INFO: model_name = claude-sonnet-4-6
[2026-03-09 13:14:47,773] [agent.LLM] INFO: model_path = (未设置)
[2026-03-09 13:14:47,773] [agent.LLM] INFO: api_base = (默认)
[2026-03-09 13:14:47,773] [agent.LLM] INFO: temperature= 0.7
[2026-03-09 13:14:47,774] [agent.LLM] INFO: max_tokens = 4096
[2026-03-09 13:14:47,775] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:14:47,776] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成(支持多步串行调用)
[2026-03-09 13:14:47,777] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:15:29,737] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统(配置文件驱动)...
[2026-03-09 13:15:29,739] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────
[LLM] provider = monica
[LLM] model_name = gpt-4o
[LLM] model_path = (未设置)
[LLM] api_key = ***
[LLM] temperature = 0.7
[MCP] server_name = MCPServer
[MCP] transport = stdio
[MCP] tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] max_steps = 10
[LOG] level = DEBUG
──────────────────────────────────────────────────
[2026-03-09 13:15:29,742] [agent.MCP] INFO: 🚀 MCP Server [MCPServer] 启动
[2026-03-09 13:15:29,743] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:15:29,743] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:15:29,745] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:15:29,745] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:15:29,745] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:15:29,745] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:15:29,746] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:15:29,746] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:15:29,747] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:15:29,748] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:15:29,748] [agent.LLM] INFO: 🧠 LLM 引擎初始化
[2026-03-09 13:15:29,749] [agent.LLM] INFO: provider = monica
[2026-03-09 13:15:29,749] [agent.LLM] INFO: model_name = gpt-4o
[2026-03-09 13:15:29,749] [agent.LLM] INFO: model_path = (未设置)
[2026-03-09 13:15:29,749] [agent.LLM] INFO: api_base = https://openapi.monica.im/v1
[2026-03-09 13:15:29,750] [agent.LLM] INFO: temperature= 0.7
[2026-03-09 13:15:29,750] [agent.LLM] INFO: max_tokens = 4096
[2026-03-09 13:15:29,751] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:15:29,752] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成(支持多步串行调用)
[2026-03-09 13:15:29,752] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:17:27,993] [agent.CLIENT] INFO: ══════════════════════════════════════════════════════════
[2026-03-09 13:17:27,995] [agent.CLIENT] INFO: 📨 Step 1 [CLIENT] 收到用户输入: 从从网络搜索获取系统名称的python代码并进行执行
[2026-03-09 13:17:27,996] [agent.CLIENT] INFO: ══════════════════════════════════════════════════════════
[2026-03-09 13:17:27,996] [agent.MEMORY] DEBUG: 💬 [USER] 从从网络搜索获取系统名称的python代码并进行执行...
[2026-03-09 13:17:27,997] [agent.CLIENT] INFO: 🗺 Step 2 [LLM] 分析任务,规划工具调用链...
[2026-03-09 13:17:27,997] [agent.LLM] INFO: 🗺 规划工具调用链 [gpt-4o]: 从从网络搜索获取系统名称的python代码并进行执行...
[2026-03-09 13:17:27,998] [agent.LLM] DEBUG: 📝 Prompt 已构造 (436 chars)
[2026-03-09 13:17:27,998] [agent.LLM] INFO: 📋 规划完成: ChainPlan(goal='从从网络搜索获取系统名称的python代码并进行执行', chain=[1]web_search)
[2026-03-09 13:17:27,998] [agent.LLM] INFO: Step 1: [web_search] depends_on=[] — 网络搜索
[2026-03-09 13:17:27,998] [agent.CLIENT] INFO:
──────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: 从从网络搜索获取系统名称的python代码并进行执行
步骤: 1 步
──────────────────────────────────────────────────────────
[2026-03-09 13:17:27,999] [agent.CLIENT] INFO:
▶ Step 1/1 执行中
工具: [web_search]
说明: 网络搜索
参数: {'query': '从从网络搜索获取系统名称的python代码并进行执行', 'max_results': 5}
[2026-03-09 13:17:28,002] [agent.CLIENT] INFO: 📡 [MCP] 发送请求: {'jsonrpc': '2.0', 'id': 'dc801fda', 'method': 'tools/call', 'params': {'name': 'web_search', 'arguments': {'query': '从从网络搜索获取系统名称的python代码并进行执行', 'max_results': 5}}}
[2026-03-09 13:17:28,003] [agent.MCP] INFO: 📨 收到请求 id=dc801fda method=tools/call transport=stdio
[2026-03-09 13:17:28,003] [agent.TOOL] INFO: ▶ 执行工具 [web_search],参数: {'query': '从从网络搜索获取系统名称的python代码并进行执行', 'max_results': 5}
[2026-03-09 13:17:28,108] [agent.TOOL] INFO: ✅ 工具 [web_search] 执行成功
[2026-03-09 13:17:28,109] [agent.CLIENT] INFO: ✅ Step 1 成功: 搜索「从从网络搜索获取系统名称的python代码并进行执行」(mock),共 2 条:
[1] Python 官方文档
Python 3.12 新特性:...
[2026-03-09 13:17:28,109] [agent.MEMORY] DEBUG: 💬 [TOOL] 搜索「从从网络搜索获取系统名称的python代码并进行执行」(mock),共 2 条:
[1] Python 官方文档
...
[2026-03-09 13:17:28,111] [agent.CLIENT] INFO: ──────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 1/1 步
──────────────────────────────────────────────────────────
[2026-03-09 13:17:28,111] [agent.CLIENT] INFO: ✍️ Step 5 [LLM] 整合所有步骤结果,生成最终回复...
[2026-03-09 13:17:28,111] [agent.LLM] INFO: ✍️ 整合单步工具结果 [gpt-4o]...
[2026-03-09 13:17:28,111] [agent.MEMORY] DEBUG: 💬 [ASSISTANT] ✅ 已通过 [web_search] 工具处理您的请求。
**执行结果:**
搜索「从从网络搜索获取系统名称的pyth...
[2026-03-09 13:18:21,524] [agent.CLIENT] INFO: ══════════════════════════════════════════════════════════
[2026-03-09 13:18:21,524] [agent.CLIENT] INFO: 📨 Step 1 [CLIENT] 收到用户输入: 计算1+1等于多少
[2026-03-09 13:18:21,524] [agent.CLIENT] INFO: ══════════════════════════════════════════════════════════
[2026-03-09 13:18:21,525] [agent.MEMORY] DEBUG: 💬 [USER] 计算1+1等于多少...
[2026-03-09 13:18:21,525] [agent.CLIENT] INFO: 🗺 Step 2 [LLM] 分析任务,规划工具调用链...
[2026-03-09 13:18:21,526] [agent.LLM] INFO: 🗺 规划工具调用链 [gpt-4o]: 计算1+1等于多少...
[2026-03-09 13:18:21,526] [agent.LLM] DEBUG: 📝 Prompt 已构造 (617 chars)
[2026-03-09 13:18:21,528] [agent.LLM] INFO: 📋 规划完成: ChainPlan(goal='计算1+1等于多少', chain=[1]calculator)
[2026-03-09 13:18:21,528] [agent.LLM] INFO: Step 1: [calculator] depends_on=[] — 数学计算
[2026-03-09 13:18:21,529] [agent.CLIENT] INFO:
──────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: 计算1+1等于多少
步骤: 1 步
──────────────────────────────────────────────────────────
[2026-03-09 13:18:21,529] [agent.CLIENT] INFO:
▶ Step 1/1 执行中
工具: [calculator]
说明: 数学计算
参数: {'expression': '1+1'}
[2026-03-09 13:18:21,530] [agent.CLIENT] INFO: 📡 [MCP] 发送请求: {'jsonrpc': '2.0', 'id': 'd6becfe5', 'method': 'tools/call', 'params': {'name': 'calculator', 'arguments': {'expression': '1+1'}}}
[2026-03-09 13:18:21,530] [agent.MCP] INFO: 📨 收到请求 id=d6becfe5 method=tools/call transport=stdio
[2026-03-09 13:18:21,530] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '1+1'}
[2026-03-09 13:18:21,532] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:18:21,533] [agent.CLIENT] INFO: ✅ Step 1 成功: 1+1 = 2...
[2026-03-09 13:18:21,533] [agent.MEMORY] DEBUG: 💬 [TOOL] 1+1 = 2...
[2026-03-09 13:18:21,533] [agent.CLIENT] INFO: ──────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 1/1 步
──────────────────────────────────────────────────────────
[2026-03-09 13:18:21,534] [agent.CLIENT] INFO: ✍️ Step 5 [LLM] 整合所有步骤结果,生成最终回复...
[2026-03-09 13:18:21,534] [agent.LLM] INFO: ✍️ 整合单步工具结果 [gpt-4o]...
[2026-03-09 13:18:21,534] [agent.MEMORY] DEBUG: 💬 [ASSISTANT] ✅ 已通过 [calculator] 工具处理您的请求。
**执行结果:**
1+1 = 2
---
*由 gpt-...
[2026-03-09 13:29:29,657] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 13:29:29,659] [agent.SYSTEM] INFO: ────────────────────────────────────────────────────
📋 当前配置
────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = (未设置)
[LLM] api_base_url = (默认)
[LLM] temperature = 0.7
[LLM] max_tokens = 4096
[LLM] function_calling = True
[LLM] stream = False
[LLM] max_retries = 3
[MCP] server_name = DemoMCPServer
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] fallback_rules = True
[LOG] level = DEBUG
────────────────────────────────────────────────────
[2026-03-09 13:29:29,661] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 13:29:29,662] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:29:29,662] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:29:29,663] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:29:29,663] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:29:29,664] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:29:29,664] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:29:29,664] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:29:29,664] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:29:29,665] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:29:29,665] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:29:29,665] [agent.LLM] INFO: 🏭 Provider 工厂: 创建 [openai] Provider
[2026-03-09 13:29:33,124] [agent.LLM] WARNING: ⚠️ LLM_API_KEY 未设置OpenAI API 调用将失败。
请设置环境变量: export LLM_API_KEY=sk-...
[2026-03-09 13:29:33,315] [agent.LLM] INFO: ✅ OpenAI 客户端初始化完成
model = gpt-4o
base_url = https://api.openai.com/v1
max_retries= 3
[2026-03-09 13:29:33,315] [agent.LLM] INFO: 🧠 LLM 引擎初始化完成
[2026-03-09 13:29:33,315] [agent.LLM] INFO: provider = openai
[2026-03-09 13:29:33,315] [agent.LLM] INFO: model_name = gpt-4o
[2026-03-09 13:29:33,315] [agent.LLM] INFO: function_calling = True
[2026-03-09 13:29:33,315] [agent.LLM] INFO: temperature = 0.7
[2026-03-09 13:29:33,316] [agent.LLM] INFO: fallback_rules = True
[2026-03-09 13:29:33,316] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:29:33,317] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成OpenAI Function Calling 模式)
[2026-03-09 13:29:33,317] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:30:14,954] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 13:30:14,954] [agent.SYSTEM] INFO: ────────────────────────────────────────────────────
📋 当前配置
────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] temperature = 0.7
[LLM] max_tokens = 4096
[LLM] function_calling = True
[LLM] stream = False
[LLM] max_retries = 3
[MCP] server_name = DemoMCPServer
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] fallback_rules = True
[LOG] level = DEBUG
────────────────────────────────────────────────────
[2026-03-09 13:30:14,955] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 13:30:14,956] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:30:14,956] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:30:14,957] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:30:14,957] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:30:14,957] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:30:14,958] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:30:14,958] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:30:14,959] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:30:14,960] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:30:14,960] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:30:14,961] [agent.LLM] INFO: 🏭 Provider 工厂: 创建 [openai] Provider
[2026-03-09 13:30:17,075] [agent.LLM] INFO: 🔗 使用自定义 API 地址: https://openapi.monica.im/v1
[2026-03-09 13:30:17,245] [agent.LLM] INFO: ✅ OpenAI 客户端初始化完成
model = gpt-4o
base_url = https://openapi.monica.im/v1
max_retries= 3
[2026-03-09 13:30:17,246] [agent.LLM] INFO: 🧠 LLM 引擎初始化完成
[2026-03-09 13:30:17,246] [agent.LLM] INFO: provider = openai
[2026-03-09 13:30:17,246] [agent.LLM] INFO: model_name = gpt-4o
[2026-03-09 13:30:17,246] [agent.LLM] INFO: function_calling = True
[2026-03-09 13:30:17,247] [agent.LLM] INFO: temperature = 0.7
[2026-03-09 13:30:17,247] [agent.LLM] INFO: fallback_rules = True
[2026-03-09 13:30:17,248] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:30:17,249] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成OpenAI Function Calling 模式)
[2026-03-09 13:30:17,249] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:30:39,084] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:30:39,085] [agent.CLIENT] INFO: 📨 收到用户输入: 234乘以123等于多少
[2026-03-09 13:30:39,085] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:30:39,086] [agent.MEMORY] DEBUG: 💬 [USER] 234乘以123等于多少...
[2026-03-09 13:30:39,086] [agent.CLIENT] INFO: 🗺 Step 2 [LLM] 规划工具调用链...
[2026-03-09 13:30:39,086] [agent.LLM] INFO: 🗺 规划工具调用链: 234乘以123等于多少...
[2026-03-09 13:30:39,087] [agent.LLM] DEBUG: 📤 发送规划请求tools 数量: 4
[2026-03-09 13:30:39,087] [agent.LLM] DEBUG: 📤 消息历史长度: 3
[2026-03-09 13:30:42,294] [agent.LLM] INFO: 📊 Token 用量: prompt=395, completion=17
[2026-03-09 13:30:42,294] [agent.LLM] INFO: 📋 解析到 1 个工具调用步骤
[2026-03-09 13:30:42,294] [agent.LLM] INFO: 📋 OpenAI 规划完成: 1 步
[2026-03-09 13:30:42,294] [agent.LLM] INFO: Step 1: [calculator] args={'expression': '234*123'}
[2026-03-09 13:30:42,294] [agent.CLIENT] INFO:
────────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: calculator
步骤: 1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:30:42,295] [agent.CLIENT] INFO:
▶ Step 1 执行中
工具: [calculator]
说明: 调用 calculator由 OpenAI Function Calling 规划)
参数: {'expression': '234*123'}
call_id: call_6b8b5745
[2026-03-09 13:30:42,295] [agent.MCP] INFO: 📨 收到请求 id=11862afa method=tools/call transport=stdio
[2026-03-09 13:30:42,295] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '234*123'}
[2026-03-09 13:30:42,297] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:30:42,297] [agent.CLIENT] INFO: ✅ Step 1 成功: 234*123 = 28782...
[2026-03-09 13:30:42,297] [agent.MEMORY] DEBUG: 💬 [TOOL] 234*123 = 28782...
[2026-03-09 13:30:42,297] [agent.CLIENT] INFO: ────────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 1/1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:30:42,297] [agent.CLIENT] INFO: ✍️ Step 5 [LLM] 调用 OpenAI 生成最终回复...
[2026-03-09 13:30:42,298] [agent.LLM] INFO: ✍️ 整合单步工具结果 [calculator]...
[2026-03-09 13:30:42,298] [agent.LLM] INFO: ✍️ 生成最终回复...
[2026-03-09 13:30:42,298] [agent.LLM] DEBUG: 📤 发送回复生成请求,消息长度: 3
[2026-03-09 13:30:42,924] [agent.LLM] WARNING: ⚠️ OpenAI 回复生成失败: 生成回复失败: Error code: 400 - {'error': {'code': '11111', 'message': "StatusCode: 400: API returned unexpected status code: 400: Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.", 'type': 'invalid_request_error'}}
[2026-03-09 13:30:42,924] [agent.MEMORY] DEBUG: 💬 [ASSISTANT] ✅ **任务已完成**
针对您的需求「234乘以123等于多少」执行结果如下
234*123 = 28782...
[2026-03-09 13:30:42,924] [agent.CLIENT] INFO: 🎉 [CLIENT] 流程完成,回复已返回
[2026-03-09 13:34:35,843] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:34:35,845] [agent.CLIENT] INFO: 📨 收到用户输入: 先计算34乘以45再计算12乘以23
[2026-03-09 13:34:35,845] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:34:35,846] [agent.MEMORY] DEBUG: 💬 [USER] 先计算34乘以45再计算12乘以23...
[2026-03-09 13:34:35,846] [agent.CLIENT] INFO: 🗺 Step 2 [LLM] 规划工具调用链...
[2026-03-09 13:34:35,846] [agent.LLM] INFO: 🗺 规划工具调用链: 先计算34乘以45再计算12乘以23...
[2026-03-09 13:34:35,846] [agent.LLM] DEBUG: 📤 发送规划请求tools 数量: 4
[2026-03-09 13:34:35,847] [agent.LLM] DEBUG: 📤 消息历史长度: 4
[2026-03-09 13:34:37,936] [agent.LLM] INFO: 📊 Token 用量: prompt=424, completion=49
[2026-03-09 13:34:37,936] [agent.LLM] INFO: 📋 解析到 2 个工具调用步骤
[2026-03-09 13:34:37,937] [agent.LLM] INFO: 📋 OpenAI 规划完成: 2 步
[2026-03-09 13:34:37,937] [agent.LLM] INFO: Step 1: [calculator] args={'expression': '34*45'}
[2026-03-09 13:34:37,937] [agent.LLM] INFO: Step 2: [calculator] args={'expression': '12*23'}
[2026-03-09 13:34:37,937] [agent.CLIENT] INFO:
────────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: calculator → calculator
步骤: 2 步
────────────────────────────────────────────────────────────
[2026-03-09 13:34:37,938] [agent.CLIENT] INFO:
▶ Step 1 执行中
工具: [calculator]
说明: 调用 calculator由 OpenAI Function Calling 规划)
参数: {'expression': '34*45'}
call_id: call_78edf968
[2026-03-09 13:34:37,938] [agent.MCP] INFO: 📨 收到请求 id=ec8a2d23 method=tools/call transport=stdio
[2026-03-09 13:34:37,938] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '34*45'}
[2026-03-09 13:34:37,938] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:34:37,938] [agent.CLIENT] INFO: ✅ Step 1 成功: 34*45 = 1530...
[2026-03-09 13:34:37,938] [agent.MEMORY] DEBUG: 💬 [TOOL] 34*45 = 1530...
[2026-03-09 13:34:37,939] [agent.CLIENT] INFO:
▶ Step 2 执行中
工具: [calculator]
说明: 调用 calculator由 OpenAI Function Calling 规划)
参数: {'expression': '12*23'}
call_id: call_f3174090
[2026-03-09 13:34:37,939] [agent.MCP] INFO: 📨 收到请求 id=ce5f9e24 method=tools/call transport=stdio
[2026-03-09 13:34:37,939] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '12*23'}
[2026-03-09 13:34:37,939] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:34:37,939] [agent.CLIENT] INFO: ✅ Step 2 成功: 12*23 = 276...
[2026-03-09 13:34:37,940] [agent.MEMORY] DEBUG: 💬 [TOOL] 12*23 = 276...
[2026-03-09 13:34:37,940] [agent.CLIENT] INFO: ────────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 2/2 步
────────────────────────────────────────────────────────────
[2026-03-09 13:34:37,940] [agent.CLIENT] INFO: ✍️ Step 5 [LLM] 调用 OpenAI 生成最终回复...
[2026-03-09 13:34:37,940] [agent.LLM] INFO: ✍️ 生成最终回复...
[2026-03-09 13:34:37,940] [agent.LLM] DEBUG: 📤 发送回复生成请求,消息长度: 5
[2026-03-09 13:34:39,468] [agent.LLM] INFO: ✅ 回复生成成功,长度: 25 charsToken: 18
[2026-03-09 13:34:39,469] [agent.LLM] INFO: ✅ OpenAI 回复生成成功 (25 chars)
[2026-03-09 13:34:39,469] [agent.MEMORY] DEBUG: 💬 [CHAIN] 34乘以45等于153012乘以23等于276。...
[2026-03-09 13:34:39,469] [agent.MEMORY] INFO: 🔗 调用链已记录: 2/2 步成功
[2026-03-09 13:34:39,470] [agent.CLIENT] INFO: 🎉 [CLIENT] 流程完成,回复已返回
[2026-03-09 13:35:13,266] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:35:13,267] [agent.CLIENT] INFO: 📨 收到用户输入: 先计算34乘以56再将结果乘以12
[2026-03-09 13:35:13,268] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:35:13,268] [agent.MEMORY] DEBUG: 💬 [USER] 先计算34乘以56再将结果乘以12...
[2026-03-09 13:35:13,269] [agent.CLIENT] INFO: 🗺 Step 2 [LLM] 规划工具调用链...
[2026-03-09 13:35:13,269] [agent.LLM] INFO: 🗺 规划工具调用链: 先计算34乘以56再将结果乘以12...
[2026-03-09 13:35:13,269] [agent.LLM] DEBUG: 📤 发送规划请求tools 数量: 4
[2026-03-09 13:35:13,270] [agent.LLM] DEBUG: 📤 消息历史长度: 6
[2026-03-09 13:35:15,298] [agent.LLM] INFO: 📊 Token 用量: prompt=461, completion=17
[2026-03-09 13:35:15,299] [agent.LLM] INFO: 📋 解析到 1 个工具调用步骤
[2026-03-09 13:35:15,299] [agent.LLM] INFO: 📋 OpenAI 规划完成: 1 步
[2026-03-09 13:35:15,299] [agent.LLM] INFO: Step 1: [calculator] args={'expression': '34*56'}
[2026-03-09 13:35:15,299] [agent.CLIENT] INFO:
────────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: calculator
步骤: 1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:35:15,300] [agent.CLIENT] INFO:
▶ Step 1 执行中
工具: [calculator]
说明: 调用 calculator由 OpenAI Function Calling 规划)
参数: {'expression': '34*56'}
call_id: call_6e1cc584
[2026-03-09 13:35:15,300] [agent.MCP] INFO: 📨 收到请求 id=a2b93b20 method=tools/call transport=stdio
[2026-03-09 13:35:15,300] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '34*56'}
[2026-03-09 13:35:15,300] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:35:15,301] [agent.CLIENT] INFO: ✅ Step 1 成功: 34*56 = 1904...
[2026-03-09 13:35:15,301] [agent.MEMORY] DEBUG: 💬 [TOOL] 34*56 = 1904...
[2026-03-09 13:35:15,301] [agent.CLIENT] INFO: ────────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 1/1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:35:15,301] [agent.CLIENT] INFO: ✍️ Step 5 [LLM] 调用 OpenAI 生成最终回复...
[2026-03-09 13:35:15,301] [agent.LLM] INFO: ✍️ 整合单步工具结果 [calculator]...
[2026-03-09 13:35:15,302] [agent.LLM] INFO: ✍️ 生成最终回复...
[2026-03-09 13:35:15,302] [agent.LLM] DEBUG: 📤 发送回复生成请求,消息长度: 3
[2026-03-09 13:35:15,929] [agent.LLM] WARNING: ⚠️ OpenAI 回复生成失败: 生成回复失败: Error code: 400 - {'error': {'code': '11111', 'message': "StatusCode: 400: API returned unexpected status code: 400: Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.", 'type': 'invalid_request_error'}}
[2026-03-09 13:35:15,929] [agent.MEMORY] DEBUG: 💬 [ASSISTANT] ✅ **任务已完成**
针对您的需求「先计算34乘以56再将结果乘以12」执行结果如下
34*56 = 190...
[2026-03-09 13:35:15,930] [agent.CLIENT] INFO: 🎉 [CLIENT] 流程完成,回复已返回

281
main.py
View File

@ -1,14 +1,21 @@
"""程序入口"""
"""
main.py
智能体 Demo 程序入口
组装所有模块启动交互式对话循环
智能体 Demo 程序入口OpenAI Function Calling 驱动
运行模式:
python main.py 交互模式
python main.py demo 演示模式
python main.py config 打印当前配置
python main.py health 检测 OpenAI API 连通性
LLM_API_KEY=sk-xxx python main.py 指定 API Key
LLM_MODEL_NAME=gpt-4-turbo python main.py 指定模型
AGENT_CONFIG_PATH=my.yaml python main.py 指定配置文件
"""
import sys
import argparse
# ── 导入各模块 ─────────────────────────────────────────────────
from client.agent_client import AgentClient
from client.agent_client import AgentClient, AgentResponse
from config.settings import settings
from llm.llm_engine import LLMEngine
from mcp.mcp_server import MCPServer
from memory.memory_store import MemoryStore
@ -16,92 +23,142 @@ from tools.calculator import CalculatorTool
from tools.code_executor import CodeExecutorTool
from tools.file_reader import FileReaderTool
from tools.web_search import WebSearchTool
from tools.tool_generator import ToolGeneratorTool
from utils.logger import get_logger
logger = get_logger("SYSTEM")
_ALL_TOOLS = {
"calculator": CalculatorTool,
"web_search": WebSearchTool,
"file_reader": FileReaderTool,
"code_executor": CodeExecutorTool,
}
# ── 系统组装 ───────────────────────────────────────────────────
def build_agent(agent_prompt) -> AgentClient:
"""
工厂函数组装并返回完整的 Agent 实例
def build_agent() -> AgentClient:
"""工厂函数:由 settings 驱动的 Agent 组装"""
logger.info("🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...")
logger.info(settings.display())
组装顺序:
1. 初始化 MCP Server注册所有工具
2. 初始化 LLM 引擎
3. 初始化 Memory 模块
4. 组装 AgentClient
"""
logger.info("🔧 开始组装 Agent 系统...")
mcp_server = MCPServer()
for tool_cls in _ALL_TOOLS.values():
mcp_server.register_tool(tool_cls)
# 1. MCP Server注册所有工具
mcp_server = MCPServer(server_name="DemoMCPServer")
mcp_server.register_tools(
CalculatorTool,
WebSearchTool,
FileReaderTool,
CodeExecutorTool,
ToolGeneratorTool
)
# 2. LLM 引擎
llm = LLMEngine(model_name="gpt-4o")
# 3. 记忆模块
memory = MemoryStore(max_history=20)
# 4. 组装客户端
client = AgentClient(llm=llm, mcp_server=mcp_server, memory=memory, prompt=agent_prompt)
llm = LLMEngine()
memory = MemoryStore(max_history=settings.memory.max_history)
client = AgentClient(llm=llm, mcp_server=mcp_server, memory=memory)
logger.info(f"✅ Agent 组装完成,已注册工具: {mcp_server.list_tools()}")
return client
# ── 演示场景 ───────────────────────────────────────────────────
def run_demo(client: AgentClient) -> None:
"""运行预设演示场景,展示各工具的完整调用链路"""
demo_cases = [
("🔢 数学计算", "计算 (100 + 200) × 3 等于多少"),
("🌐 网络搜索", "搜索 Python 最新版本的新特性"),
("📄 文件读取", "读取文件 config.json 的内容"),
("🐍 代码执行", '执行代码 `print("Hello, Agent!")`'),
]
# ── 结果打印 ───────────────────────────────────────────────────
def print_response(response: AgentResponse) -> None:
"""格式化打印 AgentResponse"""
print(f"\n{'' * 62}")
print(f"👤 用户: {response.user_input}")
print(f"{'' * 62}")
logger.info("\n" + "" * 60)
logger.info("🎬 开始演示模式,共 4 个场景")
logger.info("" * 60)
for title, question in demo_cases:
logger.info(f"\n{'' * 55}")
logger.info(f"📌 场景: {title}")
logger.info(f"{'' * 55}")
response = client.chat(question)
print(f"\n{'' * 55}")
print(f"👤 用户: {response.user_input}")
if response.tool_used:
print(f"🔧 工具: {response.tool_used}")
print(f"📤 输出: {response.tool_output[:120]}...")
print(f"🤖 回复:\n{response.final_reply}")
if response.chain_result:
cr = response.chain_result
tag = "🔗 多步串行" if response.is_multi_step else "🔧 单步调用"
status = "✅ 全部成功" if cr.success else f"⚠️ 步骤 {cr.failed_step} 失败"
print(f"{tag} | {cr.completed_steps}/{cr.total_steps} 步 | {status}")
print()
for r in cr.step_results:
icon = "" if r.success else ""
preview = r.output.replace("\n", " ")[:90]
print(f" {icon} Step {r.step_id} [{r.tool_name}]")
if r.success:
print(f" └─ {preview}...")
else:
print(f" └─ 错误: {r.error}")
print()
# 打印记忆统计
print(f"🤖 Agent 回复:\n{response.final_reply}")
print(f"{'' * 62}\n")
# ── API 健康检测 ───────────────────────────────────────────────
def run_health_check() -> None:
"""检测 OpenAI API 连通性"""
print(f"\n{'' * 50}")
print(f" 🏥 OpenAI API 健康检测")
print(f"{'' * 50}")
print(f" Provider : {settings.llm.provider}")
print(f" Model : {settings.llm.model_name}")
print(f" API Key : {'***' + settings.llm.api_key[-4:] if len(settings.llm.api_key) > 4 else '(未设置)'}")
print(f" Base URL : {settings.llm.api_base_url or 'https://api.openai.com/v1'}")
print(f"{'' * 50}")
if not settings.llm.api_key:
print(" ❌ API Key 未设置")
print(" 💡 请设置环境变量: export LLM_API_KEY=sk-...")
print(f"{'' * 50}\n")
return
print(" ⏳ 正在检测连通性...")
llm = LLMEngine()
ok = llm.provider.health_check()
if ok:
print(f" ✅ API 连通正常,模型 [{settings.llm.model_name}] 可用")
else:
print(f" ❌ API 连接失败,请检查网络或 API Key")
print(f" 💡 可尝试设置代理: export LLM_API_BASE_URL=https://your-proxy/v1")
print(f"{'' * 50}\n")
# ── 演示场景 ───────────────────────────────────────────────────
def run_demo(client: AgentClient) -> None:
"""运行预设演示场景"""
demo_cases = [
("🔢 单步: 数学计算",
"计算 (100 + 200) × 3 等于多少?"),
("🌐 单步: 网络搜索",
"搜索 Python 3.12 的主要新特性"),
("🔗 两步: 搜索 + 计算",
"搜索 Python 最新版本号,然后计算 3.12 × 100 的结果"),
("🔗 两步: 读取文件 + 执行代码",
"读取 script.py 文件然后执行里面的代码"),
("💬 无工具: 直接问答",
"你好,请介绍一下你自己"),
]
logger.info("\n" + "" * 62)
logger.info(f"🎬 演示模式 | 模型: {settings.llm.model_name} | "
f"Provider: {settings.llm.provider}")
logger.info("" * 62)
for title, question in demo_cases:
logger.info(f"\n📌 场景: {title}")
response = client.chat(question)
print_response(response)
stats = client.get_memory_stats()
logger.info(f"\n📊 Memory 统计: {stats}")
print(f"📊 Memory 统计: {stats}\n")
# ── 交互式对话循环 ─────────────────────────────────────────────
# ── 交互───────────────────────────────────────────────────
def run_interactive(client: AgentClient) -> None:
"""启动交互式命令行对话"""
print("\n" + "" * 60)
print(" 🤖 Agent Demo — 交互模式")
print(" 输入 'quit' → 退出程序")
print(" 输入 'clear' → 清空会话历史")
print(" 输入 'stats' → 查看 Memory 统计")
print(" 输入 'tools' → 查看已注册工具列表")
print("" * 60 + "\n")
print("\n" + "" * 62)
print(f" 🤖 Agent | {settings.llm.model_name} | {settings.llm.provider}")
print(f" Function Calling: {'✅ 开启' if settings.llm.function_calling else '❌ 关闭(规则引擎)'}")
print(f" Fallback Rules : {'✅ 开启' if settings.agent.fallback_to_rules else '❌ 关闭'}")
print("" * 62)
print(" 💡 示例:")
print(" 计算 (100+200) × 3")
print(" 搜索 Python 新特性,然后计算 3.12 × 100")
print(" 读取 config.json 文件然后执行代码")
print("" * 62)
print(" 🛠 命令: config / health / tools / chains / stats / clear / quit")
print("" * 62 + "\n")
while True:
try:
@ -113,48 +170,80 @@ def run_interactive(client: AgentClient) -> None:
if not user_input:
continue
# ── 内置命令 ──────────────────────────────────────────
match user_input.lower():
case "quit" | "exit":
print("👋 再见!")
break
case "config":
print(settings.display())
case "health":
run_health_check()
case "clear":
client.clear_session()
print("✅ 会话已清空\n")
continue
case "stats":
print(f"📊 Memory 统计: {client.get_memory_stats()}\n")
continue
print(f"📊 {client.get_memory_stats()}\n")
case "tools":
tools = client.mcp_server.list_tools()
print(f"🔧 已注册工具 ({len(tools)} 个): {', '.join(tools)}\n")
continue
schemas = client.mcp_server.get_tool_schemas()
print(f"🔧 已注册工具 ({len(schemas)} 个):")
for s in schemas:
print(f" • [{s.name}] {s.description}")
print()
case "chains":
chains = client.memory.get_chain_history()
if not chains:
print("🔗 暂无调用链历史\n")
else:
print(f"🔗 调用链历史 ({len(chains)} 条):")
for i, c in enumerate(chains, 1):
steps = "".join(s["tool_name"] for s in c["steps"])
ok_cnt = sum(1 for s in c["steps"] if s["success"])
total = len(c["steps"])
print(f" {i}. [{c['timestamp'][11:19]}] {c['goal'][:38]}...")
print(f" 链路: {steps} ({ok_cnt}/{total} 步成功)")
print()
case _:
response = client.chat(user_input)
print_response(response)
# ── 执行 Agent 完整流程 ───────────────────────────────
response = client.chat(user_input)
print(f"\n{'' * 55}")
if response.tool_used:
print(f" 🔧 调用工具: {response.tool_used}")
print(f"🤖 Agent:\n{response.final_reply}")
print(f"{'' * 55}\n")
# ── 配置打印 ───────────────────────────────────────────────────
def run_show_config() -> None:
print(settings.display())
print("\n📁 配置文件查找路径(按优先级):")
print(" 1. 环境变量 AGENT_CONFIG_PATH")
print(" 2. ./config/config.yaml")
print(" 3. ./config.yaml")
print("\n🌍 支持的环境变量覆盖:")
env_vars = [
("LLM_API_KEY", "OpenAI API 密钥sk-..."),
("LLM_MODEL_NAME", "模型名称,如 gpt-4o / gpt-4-turbo"),
("LLM_API_BASE_URL", "自定义 API 地址(兼容代理)"),
("LLM_MODEL_PATH", "本地模型路径"),
("SEARCH_API_KEY", "搜索 API 密钥"),
("LOG_LEVEL", "日志级别 DEBUG/INFO/WARNING/ERROR"),
("AGENT_CONFIG_PATH","配置文件路径"),
]
for var, desc in env_vars:
print(f" {var:<22}{desc}")
print()
# ── 主函数 ─────────────────────────────────────────────────────
def main() -> None:
"""
主函数入口支持两种运行模式:
mode = sys.argv[1] if len(sys.argv) > 1 else "interactive"
python main.py 交互模式默认
python main.py demo 演示模式自动执行预设场景
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--daemon", help="服务模式", action="store_true")
parser.add_argument("-p", "--prompt", default="你是一个通用智能体,非常擅长将用户指令分解成可以执行的任务进行执行。", help="智能体提示此词, 例如你是一个XXXXX非常擅长……")
args = parser.parse_args(sys.argv[1:])
client = build_agent(args.prompt)
if mode == "config":
run_show_config()
return
if args.daemon:
if mode == "health":
run_health_check()
return
client = build_agent()
if mode == "demo":
run_demo(client)
else:
run_interactive(client)

View File

@ -1,8 +1,7 @@
"""MCP 协议JSON-RPC 消息定义"""
"""
mcp/mcp_protocol.py
MCP (Model Context Protocol) 协议数据结构定义
基于 JSON-RPC 2.0 规范封装请求/响应消息体
MCP 协议数据结构定义
新增: ToolStep / ChainPlan / StepResult / ChainResult 支持多工具串行调用
"""
import uuid
@ -12,32 +11,32 @@ from typing import Any
# ── MCP 方法常量 ───────────────────────────────────────────────
class MCPMethod:
TOOLS_LIST = "tools/list" # 列出所有可用工具
TOOLS_CALL = "tools/call" # 调用指定工具
RESOURCES_READ = "resources/read" # 读取资源
TOOLS_LIST = "tools/list"
TOOLS_CALL = "tools/call"
RESOURCES_READ = "resources/read"
# ── 请求消息 ───────────────────────────────────────────────────
# ════════════════════════════════════════════════════════════════
# JSON-RPC 基础消息
# ════════════════════════════════════════════════════════════════
@dataclass
class MCPRequest:
"""
MCP 工具调用请求JSON-RPC 2.0 格式
MCP 工具调用请求JSON-RPC 2.0
示例:
{
"jsonrpc": "2.0",
"id": "abc-123",
"method": "tools/call",
"params": {
"name": "calculator",
"arguments": {"expression": "1+1"}
}
"params": {"name": "calculator", "arguments": {"expression": "1+1"}}
}
"""
method: str
params: dict[str, Any] = field(default_factory=dict)
jsonrpc: str = "2.0"
id: str = field(default_factory=lambda: str(uuid.uuid4())[:8])
params: dict[str, Any] = field(default_factory=dict)
jsonrpc: str = "2.0"
id: str = field(default_factory=lambda: str(uuid.uuid4())[:8])
def to_dict(self) -> dict:
return {
@ -48,22 +47,15 @@ class MCPRequest:
}
# ── 响应消息 ───────────────────────────────────────────────────
@dataclass
class MCPResponse:
"""
MCP 工具调用响应JSON-RPC 2.0 格式
成功示例:
{"jsonrpc": "2.0", "id": "abc-123", "result": {"content": [...]}}
失败示例:
{"jsonrpc": "2.0", "id": "abc-123", "error": {"code": -32601, "message": "..."}}
MCP 工具调用响应JSON-RPC 2.0
"""
id: str
result: dict[str, Any] | None = None
error: dict[str, Any] | None = None
jsonrpc: str = "2.0"
result: dict[str, Any] | None = None
error: dict[str, Any] | None = None
jsonrpc: str = "2.0"
@property
def success(self) -> bool:
@ -75,7 +67,9 @@ class MCPResponse:
if not self.success or not self.result:
return self.error.get("message", "Unknown error") if self.error else ""
items = self.result.get("content", [])
return "\n".join(item.get("text", "") for item in items if item.get("type") == "text")
return "\n".join(
item.get("text", "") for item in items if item.get("type") == "text"
)
def to_dict(self) -> dict:
base = {"jsonrpc": self.jsonrpc, "id": self.id}
@ -86,15 +80,12 @@ class MCPResponse:
return base
# ── 工具描述 ───────────────────────────────────────────────────
@dataclass
class ToolSchema:
"""
工具的元数据描述用于 LLM 识别和选择工具
"""
"""工具元数据描述,供 LLM 识别和选择工具"""
name: str
description: str
parameters: dict[str, Any] # JSON Schema 格式的参数定义
parameters: dict[str, Any]
def to_dict(self) -> dict:
return {
@ -104,4 +95,147 @@ class ToolSchema:
"type": "object",
"properties": self.parameters,
},
}
}
# ════════════════════════════════════════════════════════════════
# 串行调用链数据结构(新增)
# ════════════════════════════════════════════════════════════════
@dataclass
class ToolStep:
"""
调用链中的单个执行步骤
Attributes:
step_id: 步骤编号 1 开始
tool_name: 要调用的工具名称
arguments: 工具参数支持 {{STEP_N_OUTPUT}} 占位符引用前步结果
description: 该步骤的自然语言说明
depends_on: 依赖的前置步骤编号列表用于上下文注入
占位符示例:
arguments = {"query": "{{STEP_1_OUTPUT}}"}
执行时自动替换为第 1 步的输出内容
"""
step_id: int
tool_name: str
arguments: dict[str, Any]
description: str = ""
depends_on: list[int] = field(default_factory=list)
def to_mcp_request(self) -> MCPRequest:
"""将步骤转换为 MCP 请求"""
return MCPRequest(
method=MCPMethod.TOOLS_CALL,
params={"name": self.tool_name, "arguments": self.arguments},
)
def inject_context(self, context: dict[str, str]) -> "ToolStep":
"""
将前步输出注入当前步骤的参数占位符
Args:
context: {"STEP_1_OUTPUT": "...", "STEP_2_OUTPUT": "..."}
Returns:
注入后的新 ToolStep不修改原对象
"""
resolved_args = {}
for key, value in self.arguments.items():
if isinstance(value, str):
for placeholder, replacement in context.items():
# 截取前 500 字符避免参数过长
value = value.replace(f"{{{{{placeholder}}}}}", replacement[:500])
resolved_args[key] = value
return ToolStep(
step_id=self.step_id,
tool_name=self.tool_name,
arguments=resolved_args,
description=self.description,
depends_on=self.depends_on,
)
@dataclass
class ChainPlan:
"""
完整的工具调用链执行计划
Attributes:
steps: 有序的步骤列表按执行顺序排列
goal: 整体目标描述
is_single: 是否为单步调用优化路径
"""
steps: list[ToolStep]
goal: str = ""
is_single: bool = False
@property
def step_count(self) -> int:
return len(self.steps)
def __repr__(self) -> str:
steps_desc = "".join(
f"[{s.step_id}]{s.tool_name}" for s in self.steps
)
return f"ChainPlan(goal={self.goal!r}, chain={steps_desc})"
@dataclass
class StepResult:
"""
单个步骤的执行结果
Attributes:
step_id: 对应的步骤编号
tool_name: 执行的工具名称
success: 是否执行成功
output: 工具输出内容
error: 失败时的错误信息
"""
step_id: int
tool_name: str
success: bool
output: str
error: str | None = None
@property
def context_key(self) -> str:
"""生成占位符 key供后续步骤引用"""
return f"STEP_{self.step_id}_OUTPUT"
@dataclass
class ChainResult:
"""
完整调用链的汇总结果
Attributes:
goal: 原始目标
step_results: 每步的执行结果列表
final_reply: LLM 整合后的最终回复
success: 整体是否成功
failed_step: 首个失败步骤编号成功时为 None
"""
goal: str
step_results: list[StepResult]
final_reply: str = ""
success: bool = True
failed_step: int | None = None
@property
def completed_steps(self) -> int:
return sum(1 for r in self.step_results if r.success)
@property
def total_steps(self) -> int:
return len(self.step_results)
def get_summary(self) -> str:
"""生成执行摘要字符串"""
lines = [f"📋 执行计划: {self.goal}", f"📊 完成步骤: {self.completed_steps}/{self.total_steps}"]
for r in self.step_results:
icon = "" if r.success else ""
lines.append(f" {icon} Step {r.step_id} [{r.tool_name}]: {r.output[:60]}...")
return "\n".join(lines)

View File

@ -1,13 +1,12 @@
"""MCP 服务器:工具注册 & 调度"""
"""
mcp/mcp_server.py
MCP Server工具注册中心与调度引擎
负责管理所有工具的生命周期处理 JSON-RPC 格式的工具调用请求
MCP Server从配置读取 server_nametransportenabled_tools
支持按配置动态过滤注册工具
"""
import json
from typing import Type
from config.settings import MCPConfig, settings
from mcp.mcp_protocol import MCPMethod, MCPRequest, MCPResponse, ToolSchema
from tools.base_tool import BaseTool, ToolResult
from utils.logger import get_logger
@ -15,40 +14,53 @@ from utils.logger import get_logger
class MCPServer:
"""
MCP 服务器核心类
MCP 服务器核心类配置驱动
职责:
1. 工具注册register_tool
2. 工具列表查询tools/list
3. 工具调用分发tools/call
4. JSON-RPC 协议封装/解析
配置项:
- server_name: 服务器名称
- transport: 通信方式 (stdio / http / websocket)
- enabled_tools: 白名单仅注册列表中的工具
使用示例:
server = MCPServer()
server = MCPServer() # 从 settings 读取配置
server = MCPServer(cfg=custom_cfg) # 使用自定义配置
server.register_tool(CalculatorTool)
response = server.handle_request(request)
"""
def __init__(self, server_name: str = "AgentMCPServer"):
self.server_name = server_name
def __init__(self, cfg: MCPConfig | None = None):
"""
Args:
cfg: MCPConfig 实例None 时从全局 settings 读取
"""
self.cfg = cfg or settings.mcp
self.logger = get_logger("MCP")
self._registry: dict[str, BaseTool] = {} # 工具名 → 工具实例
self._registry: dict[str, BaseTool] = {}
self.logger.info(f"🚀 MCP Server [{server_name}] 启动")
self.logger.info(f"🚀 MCP Server [{self.cfg.server_name}] 启动")
self.logger.info(f" transport = {self.cfg.transport}")
self.logger.info(f" enabled_tools = {self.cfg.enabled_tools}")
# ── 工具注册 ────────────────────────────────────────────────
def register_tool(self, tool_class: Type[BaseTool]) -> None:
"""
注册一个工具类到服务器
注册工具 enabled_tools 白名单过滤
Args:
tool_class: 继承自 BaseTool 的工具类传入类本身不是实例
tool_class: 继承自 BaseTool 的工具类
"""
instance = tool_class()
if not instance.name:
raise ValueError(f"工具类 {tool_class.__name__} 未设置 name 属性")
# 白名单过滤
if instance.name not in self.cfg.enabled_tools:
self.logger.warning(
f"⏭ 工具 [{instance.name}] 不在 enabled_tools 白名单中,跳过注册"
)
return
self._registry[instance.name] = instance
self.logger.info(f"📌 注册工具: [{instance.name}] — {instance.description}")
@ -57,87 +69,61 @@ class MCPServer:
for cls in tool_classes:
self.register_tool(cls)
# ── 请求处理入口 ────────────────────────────────────────────
# ── 请求处理 ────────────────────────────────────────────────
def handle_request(self, request: MCPRequest) -> MCPResponse:
"""
处理 MCP 请求的统一入口根据 method 分发到对应处理器
Args:
request: MCPRequest 实例
Returns:
MCPResponse 实例
"""
self.logger.info(f"📨 收到请求 id={request.id} method={request.method}")
"""处理 MCP 请求的统一入口"""
self.logger.info(
f"📨 收到请求 id={request.id} method={request.method} "
f"transport={self.cfg.transport}"
)
handlers = {
MCPMethod.TOOLS_LIST: self._handle_tools_list,
MCPMethod.TOOLS_CALL: self._handle_tools_call,
}
handler = handlers.get(request.method)
if handler is None:
return self._error_response(request.id, -32601, f"未知方法: {request.method}")
return handler(request)
# ── 私有处理器 ──────────────────────────────────────────────
def _handle_tools_list(self, request: MCPRequest) -> MCPResponse:
"""处理 tools/list 请求,返回所有已注册工具的 Schema"""
schemas = [tool.get_schema().to_dict() for tool in self._registry.values()]
self.logger.info(f"📋 返回工具列表,共 {len(schemas)} 个工具")
return MCPResponse(
id=request.id,
result={"tools": schemas},
)
self.logger.info(f"📋 返回工具列表,共 {len(schemas)}")
return MCPResponse(id=request.id, result={"tools": schemas})
def _handle_tools_call(self, request: MCPRequest) -> MCPResponse:
"""处理 tools/call 请求,调用指定工具并返回结果"""
tool_name = request.params.get("name")
arguments = request.params.get("arguments", {})
# 检查工具是否存在
tool = self._registry.get(tool_name)
tool = self._registry.get(tool_name)
if tool is None:
available = list(self._registry.keys())
return self._error_response(
request.id, -32602,
f"工具 [{tool_name}] 不存在,可用工具: {available}"
f"工具 [{tool_name}] 不存在,可用: {list(self._registry.keys())}"
)
# 执行工具
result: ToolResult = tool.safe_execute(**arguments)
if result.success:
return MCPResponse(
id=request.id,
result={
"content": [{"type": "text", "text": result.output}],
"metadata": result.metadata,
},
result={"content": [{"type": "text", "text": result.output}],
"metadata": result.metadata},
)
else:
return self._error_response(request.id, -32000, result.output)
return self._error_response(request.id, -32000, result.output)
# ── 工具方法 ────────────────────────────────────────────────
def get_tool_schemas(self) -> list[ToolSchema]:
"""获取所有工具的 Schema 列表(供 LLM 引擎使用)"""
return [tool.get_schema() for tool in self._registry.values()]
def list_tools(self) -> list[str]:
"""返回所有已注册工具的名称列表"""
return list(self._registry.keys())
@staticmethod
def _error_response(req_id: str, code: int, message: str) -> MCPResponse:
"""构造标准 JSON-RPC 错误响应"""
return MCPResponse(
id=req_id,
error={"code": code, "message": message},
)
return MCPResponse(id=req_id, error={"code": code, "message": message})
def __repr__(self) -> str:
return f"MCPServer(name={self.server_name!r}, tools={self.list_tools()})"
return (
f"MCPServer(name={self.cfg.server_name!r}, "
f"transport={self.cfg.transport!r}, "
f"tools={self.list_tools()})"
)

View File

@ -1,22 +1,25 @@
"""记忆模块:对话历史管理"""
"""
memory/memory_store.py
Agent 记忆模块管理对话历史短期记忆与关键信息摘要长期记忆
Agent 记忆模块管理对话历史短期与关键事实长期
新增: add_chain_result() 记录完整多步骤调用链
"""
from collections import deque
from dataclasses import dataclass, field
from datetime import datetime
from typing import Literal
from typing import TYPE_CHECKING, Literal
from utils.logger import get_logger
if TYPE_CHECKING:
from mcp.mcp_protocol import ChainResult
# ── 消息数据结构 ───────────────────────────────────────────────
@dataclass
class Message:
"""单条对话消息"""
role: Literal["user", "assistant", "tool"]
role: Literal["user", "assistant", "tool", "chain"]
content: str
timestamp: str = field(default_factory=lambda: datetime.now().strftime("%H:%M:%S"))
metadata: dict = field(default_factory=dict)
@ -34,74 +37,102 @@ class MemoryStore:
"""
对话记忆存储
短期记忆: 使用 deque 保存最近 N 轮对话自动滚动淘汰旧消息
长期记忆: 保存关键事实摘要生产环境可替换为向量数据库
短期记忆: deque 保存最近 N 轮对话自动滚动淘汰
长期记忆: 关键事实列表生产环境可替换为向量数据库
链路记录: 完整的多步骤调用链历史
使用示例:
memory = MemoryStore(max_history=10)
memory = MemoryStore(max_history=20)
memory.add_user_message("你好")
memory.add_assistant_message("你好!有什么可以帮你?")
history = memory.get_history()
memory.add_chain_result(chain_result)
"""
def __init__(self, max_history: int = 20):
"""
Args:
max_history: 短期记忆保留的最大消息条数
"""
self.logger = get_logger("MEMORY")
self.max_history = max_history
self._history: deque[Message] = deque(maxlen=max_history)
self._facts: list[str] = [] # 长期记忆:关键事实
self.logger = get_logger("MEMORY")
self.max_history = max_history
self._history: deque[Message] = deque(maxlen=max_history)
self._facts: list[str] = []
self._chains: list[dict] = [] # 调用链历史记录
self.logger.info(f"💾 Memory 初始化,最大历史: {max_history}")
# ── 写入接口 ────────────────────────────────────────────────
def add_user_message(self, content: str) -> None:
"""记录用户消息"""
self._add(Message(role="user", content=content))
def add_assistant_message(self, content: str) -> None:
"""记录 Agent 回复"""
self._add(Message(role="assistant", content=content))
def add_tool_result(self, tool_name: str, result: str) -> None:
"""记录工具调用结果"""
self._add(Message(
role="tool",
content=result,
metadata={"tool": tool_name},
))
def add_chain_result(self, chain_result: "ChainResult") -> None:
"""
记录完整的多步骤调用链结果
Args:
chain_result: ChainResult 实例
"""
# 写入对话历史assistant 角色)
self._add(Message(
role="chain",
content=chain_result.final_reply,
metadata={
"goal": chain_result.goal,
"total_steps": chain_result.total_steps,
"completed_steps": chain_result.completed_steps,
"success": chain_result.success,
"tools_used": [r.tool_name for r in chain_result.step_results],
},
))
# 写入链路追踪记录
chain_record = {
"timestamp": datetime.now().isoformat(),
"goal": chain_result.goal,
"steps": [
{
"step_id": r.step_id,
"tool_name": r.tool_name,
"success": r.success,
"output": r.output[:200],
"error": r.error,
}
for r in chain_result.step_results
],
"success": chain_result.success,
}
self._chains.append(chain_record)
self.logger.info(
f"🔗 调用链已记录: {chain_result.completed_steps}/{chain_result.total_steps} 步成功"
)
def add_fact(self, fact: str) -> None:
"""向长期记忆中添加关键事实"""
self._facts.append(fact)
self.logger.debug(f"📌 长期记忆新增: {fact}")
# ── 读取接口 ────────────────────────────────────────────────
def get_history(self, last_n: int | None = None) -> list[dict]:
"""
获取对话历史LLM 上下文格式
Args:
last_n: 仅返回最近 N None 表示全部
Returns:
消息字典列表格式: [{"role": ..., "content": ...}, ...]
"""
messages = list(self._history)
if last_n:
messages = messages[-last_n:]
return [m.to_dict() for m in messages]
def get_facts(self) -> list[str]:
"""获取所有长期记忆事实"""
return list(self._facts)
def get_chain_history(self) -> list[dict]:
"""获取所有调用链历史记录"""
return list(self._chains)
def get_context_summary(self) -> str:
"""生成上下文摘要字符串,供 LLM Prompt 使用"""
"""生成上下文摘要,供 LLM Prompt 使用"""
history = self.get_history(last_n=6)
lines = [f"[{m['role'].upper()}] {m['content'][:80]}" for m in history]
return "\n".join(lines) if lines else "(暂无对话历史)"
@ -109,20 +140,17 @@ class MemoryStore:
# ── 管理接口 ────────────────────────────────────────────────
def clear_history(self) -> None:
"""清空短期对话历史"""
self._history.clear()
self.logger.info("🗑 对话历史已清空")
def stats(self) -> dict:
"""返回记忆统计信息"""
return {
"history_count": len(self._history),
"facts_count": len(self._facts),
"chain_count": len(self._chains),
"max_history": self.max_history,
}
# ── 私有方法 ────────────────────────────────────────────────
def _add(self, message: Message) -> None:
self._history.append(message)
self.logger.debug(f"💬 [{message.role.upper()}] {message.content[:60]}...")

View File

@ -1 +1,2 @@
openai
openai
pyyaml

View File

@ -1,14 +1,12 @@
"""计算器工具"""
# ════════════════════════════════════════════════════════════════
# tools/calculator.py — 数学计算工具
# tools/calculator.py
# ════════════════════════════════════════════════════════════════
"""
tools/calculator.py
安全的数学表达式计算工具使用 ast 模块避免 eval 注入风险
"""
"""安全的数学表达式计算工具AST 解析,防注入)"""
import ast
import operator
from config.settings import settings
from tools.base_tool import BaseTool, ToolResult
@ -16,27 +14,27 @@ class CalculatorTool(BaseTool):
name = "calculator"
description = "计算数学表达式,支持加减乘除、幂运算、括号等"
parameters = {
"expression": {
"type": "string",
"description": "数学表达式,例如 '(1+2)*3''2**10'",
}
"expression": {"type": "string", "description": "数学表达式,例如 '(1+2)*3'"},
}
# 允许的运算符白名单(防止注入)
_OPERATORS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.truediv,
ast.Pow: operator.pow,
ast.Mod: operator.mod,
ast.Add: operator.add, ast.Sub: operator.sub,
ast.Mult: operator.mul, ast.Div: operator.truediv,
ast.Pow: operator.pow, ast.Mod: operator.mod,
ast.USub: operator.neg,
}
def __init__(self):
super().__init__()
# 从配置读取精度
self._precision = settings.tools.calculator.precision
self.logger.debug(f"⚙️ Calculator 精度: {self._precision}")
def execute(self, expression: str, **_) -> ToolResult:
try:
tree = ast.parse(expression, mode="eval")
result = self._eval_node(tree.body)
result = round(result, self._precision)
return ToolResult(
success=True,
output=f"{expression} = {result}",
@ -46,7 +44,6 @@ class CalculatorTool(BaseTool):
return ToolResult(success=False, output=f"计算错误: {exc}")
def _eval_node(self, node: ast.AST) -> float:
"""递归解析 AST 节点"""
match node:
case ast.Constant(value=v) if isinstance(v, (int, float)):
return v
@ -61,4 +58,4 @@ class CalculatorTool(BaseTool):
raise ValueError(f"不支持的一元运算符: {type(op).__name__}")
return fn(self._eval_node(operand))
case _:
raise ValueError(f"不支持的表达式节点: {type(node).__name__}")
raise ValueError(f"不支持的节点: {type(node).__name__}")

View File

@ -1,34 +1,23 @@
"""代码执行工具"""
# ════════════════════════════════════════════════════════════════
# tools/code_executor.py — 代码执行工具
# tools/code_executor.py
# ════════════════════════════════════════════════════════════════
"""
tools/code_executor.py
沙箱代码执行工具在受限环境中运行 Python 代码片段
"""
"""沙箱代码执行工具(从配置读取 timeout / sandbox"""
import io
import contextlib
import time
from tools.base_tool import BaseTool, ToolResult
from config.settings import settings
class CodeExecutorTool(BaseTool):
name = "code_executor"
description = "在沙箱环境中执行 Python 代码片段,返回标准输出"
parameters = {
"code": {
"type": "string",
"description": "要执行的 Python 代码",
},
"timeout": {
"type": "integer",
"description": "超时时间(秒),默认 5",
},
"code": {"type": "string", "description": "要执行的 Python 代码"},
"timeout": {"type": "integer", "description": "超时时间(秒)"},
}
# 沙箱:仅允许安全的内置函数
_SAFE_BUILTINS = {
"print": print, "range": range, "len": len,
"int": int, "float": float, "str": str, "list": list,
@ -38,24 +27,35 @@ class CodeExecutorTool(BaseTool):
"sorted": sorted, "reversed": reversed,
}
def execute(self, code: str, timeout: int = 5, **_) -> ToolResult:
def __init__(self):
super().__init__()
cfg = settings.tools.code_executor
self._timeout = cfg.timeout
self._sandbox = cfg.sandbox
self.logger.debug(
f"⚙️ CodeExecutor timeout={self._timeout}s, sandbox={self._sandbox}"
)
def execute(self, code: str, timeout: int | None = None, **_) -> ToolResult:
timeout = timeout or self._timeout
stdout_buf = io.StringIO()
start_time = time.perf_counter()
start = time.perf_counter()
exec_globals = (
{"__builtins__": self._SAFE_BUILTINS}
if self._sandbox
else {"__builtins__": __builtins__}
)
try:
# 重定向 stdout捕获 print 输出
with contextlib.redirect_stdout(stdout_buf):
exec( # noqa: S102
compile(code, "<agent_sandbox>", "exec"),
{"__builtins__": self._SAFE_BUILTINS},
)
elapsed = (time.perf_counter() - start_time) * 1000
exec(compile(code, "<agent_sandbox>", "exec"), exec_globals) # noqa: S102
elapsed = (time.perf_counter() - start) * 1000
output = stdout_buf.getvalue() or "(无输出)"
return ToolResult(
success=True,
output=f"执行成功 ({elapsed:.1f}ms):\n{output}",
metadata={"elapsed_ms": elapsed},
output=f"执行成功 ({elapsed:.1f}ms) [sandbox={self._sandbox}]:\n{output}",
metadata={"elapsed_ms": elapsed, "sandbox": self._sandbox},
)
except Exception as exc:
return ToolResult(success=False, output=f"执行错误: {type(exc).__name__}: {exc}")
return ToolResult(success=False, output=f"执行错误: {type(exc).__name__}: {exc}")

View File

@ -1,65 +1,63 @@
"""文件读取工具"""
# ════════════════════════════════════════════════════════════════
# tools/file_reader.py — 文件读取工具
# tools/file_reader.py
# ════════════════════════════════════════════════════════════════
"""
tools/file_reader.py
本地文件读取工具支持文本文件限制读取路径防止越权
"""
"""文件读取工具(从配置读取 allowed_root / max_file_size_kb"""
from pathlib import Path
from tools.base_tool import BaseTool, ToolResult
# 允许读取的根目录(沙箱限制)
_ALLOWED_ROOT = Path("./workspace")
from config.settings import settings
class FileReaderTool(BaseTool):
name = "file_reader"
description = "读取本地文件内容,仅限 workspace/ 目录下的文件"
description = "读取本地文件内容,仅限配置的 allowed_root 目录"
parameters = {
"path": {
"type": "string",
"description": "文件路径,相对于 workspace/ 目录",
},
"encoding": {
"type": "string",
"description": "文件编码,默认 utf-8",
},
"path": {"type": "string", "description": "文件路径(相对于 allowed_root"},
"encoding": {"type": "string", "description": "文件编码,默认 utf-8"},
}
def execute(self, path: str, encoding: str = "utf-8", **_) -> ToolResult:
_ALLOWED_ROOT.mkdir(exist_ok=True)
def __init__(self):
super().__init__()
cfg = settings.tools.file_reader
self._allowed_root = Path(cfg.allowed_root)
self._max_size_kb = cfg.max_file_size_kb
self.logger.debug(
f"⚙️ FileReader root={self._allowed_root}, "
f"max_size={self._max_size_kb}KB"
)
# 路径安全检查:防止目录穿越攻击
target = (_ALLOWED_ROOT / path).resolve()
if not str(target).startswith(str(_ALLOWED_ROOT.resolve())):
return ToolResult(success=False, output=f"❌ 拒绝访问: 路径超出允许范围")
def execute(self, path: str, encoding: str = "utf-8", **_) -> ToolResult:
self._allowed_root.mkdir(parents=True, exist_ok=True)
target = (self._allowed_root / path).resolve()
if not str(target).startswith(str(self._allowed_root.resolve())):
return ToolResult(success=False, output="❌ 拒绝访问: 路径超出允许范围")
if not target.exists():
# Demo 模式:自动创建示例文件
self._create_demo_file(target)
size_kb = target.stat().st_size / 1024
if size_kb > self._max_size_kb:
return ToolResult(
success=False,
output=f"❌ 文件过大: {size_kb:.1f}KB > 限制 {self._max_size_kb}KB",
)
try:
content = target.read_text(encoding=encoding)
return ToolResult(
success=True,
output=f"文件 [{path}] 内容:\n{content}",
metadata={"path": str(target), "size": target.stat().st_size},
output=f"文件 [{path}] ({size_kb:.1f}KB):\n{content}",
metadata={"path": str(target), "size_kb": size_kb},
)
except OSError as exc:
return ToolResult(success=False, output=f"读取失败: {exc}")
@staticmethod
def _create_demo_file(path: Path) -> None:
"""自动创建演示用文件"""
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(
'{\n "app": "AgentDemo",\n "version": "1.0.0",\n'
' "llm": "claude-sonnet-4-6",\n "tools": ["calculator", "web_search"]\n}\n',
' "llm": "claude-sonnet-4-6",\n "tools": ["calculator","web_search"]\n}\n',
encoding="utf-8",
)
)

View File

@ -1,27 +1,17 @@
"""网络搜索工具"""
# ════════════════════════════════════════════════════════════════
# tools/web_search.py — 网络搜索工具(模拟)
# tools/web_search.py
# ════════════════════════════════════════════════════════════════
"""
tools/web_search.py
网络搜索工具Demo 中使用模拟数据生产环境可替换为真实 API
"""
"""网络搜索工具(从配置读取 max_results / engine / api_key"""
import time
from tools.base_tool import BaseTool, ToolResult
from config.settings import settings
# 模拟搜索结果数据库
_MOCK_RESULTS: dict[str, list[dict]] = {
"天气": [
{"title": "今日天气预报", "snippet": "晴转多云,气温 15°C ~ 24°C东南风 3 级"},
{"title": "未来 7 天天气", "snippet": "本周整体晴好,周末有小雨"},
],
"python": [
{"title": "Python 官方文档", "snippet": "Python 3.12 新特性:改进的错误提示、更快的启动速度"},
{"title": "Python 教程", "snippet": "从零开始学 Python包含 300+ 实战案例"},
],
"天气": [{"title": "今日天气预报", "snippet": "晴转多云,气温 15°C ~ 24°C东南风 3 级"},
{"title": "未来 7 天天气", "snippet": "本周整体晴好,周末有小雨"}],
"python":[{"title": "Python 官方文档", "snippet": "Python 3.12 新特性:改进的错误提示"},
{"title": "Python 教程", "snippet": "从零开始学 Python包含 300+ 实战案例"}],
}
_DEFAULT_RESULTS = [
{"title": "搜索结果 1", "snippet": "找到相关内容,请查看详情"},
@ -33,26 +23,37 @@ class WebSearchTool(BaseTool):
name = "web_search"
description = "在互联网上搜索信息,返回相关网页摘要"
parameters = {
"query": {
"type": "string",
"description": "搜索关键词或问题",
},
"max_results": {
"type": "integer",
"description": "返回结果数量,默认 3",
},
"query": {"type": "string", "description": "搜索关键词"},
"max_results": {"type": "integer", "description": "返回结果数量"},
}
def execute(self, query: str, max_results: int = 3, **_) -> ToolResult:
time.sleep(0.1) # 模拟网络延迟
def __init__(self):
super().__init__()
cfg = settings.tools.web_search
self._default_max = cfg.max_results
self._engine = cfg.engine
self._api_key = cfg.api_key
self._timeout = cfg.timeout
self.logger.debug(
f"⚙️ WebSearch engine={self._engine}, "
f"max_results={self._default_max}, "
f"api_key={'***' if self._api_key else '(未设置)'}"
)
def execute(self, query: str, max_results: int | None = None, **_) -> ToolResult:
max_results = max_results or self._default_max
time.sleep(0.1)
if self._engine != "mock" and self._api_key:
# 生产环境:调用真实搜索 API
# results = self._call_real_api(query, max_results)
pass
# 关键词匹配模拟结果
results = _DEFAULT_RESULTS
for keyword, data in _MOCK_RESULTS.items():
if keyword in query:
for kw, data in _MOCK_RESULTS.items():
if kw in query:
results = data
break
results = results[:max_results]
formatted = "\n".join(
f"[{i+1}] {r['title']}\n {r['snippet']}"
@ -60,6 +61,6 @@ class WebSearchTool(BaseTool):
)
return ToolResult(
success=True,
output=f"搜索「{query},共 {len(results)}结果:\n{formatted}",
metadata={"query": query, "count": len(results)},
)
output=f"搜索「{query}({self._engine}),共 {len(results)}:\n{formatted}",
metadata={"query": query, "engine": self._engine, "count": len(results)},
)

View File

@ -1,12 +1,10 @@
"""日志工具"""
"""
utils/logger.py
统一日志模块支持彩色终端输出与文件记录
统一日志模块 settings 读取日志级别与文件路径配置
"""
import logging
import sys
from datetime import datetime
from pathlib import Path
@ -23,7 +21,6 @@ class Color:
GREY = "\033[90m"
# ── 自定义彩色 Formatter ───────────────────────────────────────
class ColorFormatter(logging.Formatter):
LEVEL_COLORS = {
logging.DEBUG: Color.GREY,
@ -32,65 +29,65 @@ class ColorFormatter(logging.Formatter):
logging.ERROR: Color.RED,
logging.CRITICAL: Color.MAGENTA,
}
COMPONENT_COLORS = {
"CLIENT": Color.BLUE,
"LLM": Color.GREEN,
"MCP": Color.YELLOW,
"TOOL": Color.MAGENTA,
"MEMORY": Color.CYAN,
"SYSTEM": Color.GREY,
"CLIENT": Color.BLUE,
"LLM": Color.GREEN,
"MCP": Color.YELLOW,
"TOOL": Color.MAGENTA,
"MEMORY": Color.CYAN,
"SYSTEM": Color.GREY,
"CONFIG": Color.GREEN,
}
def format(self, record: logging.LogRecord) -> str:
from datetime import datetime
level_color = self.LEVEL_COLORS.get(record.levelno, Color.RESET)
time_str = datetime.now().strftime("%H:%M:%S.%f")[:-3]
# 从 logger 名称提取组件标签,例如 "agent.CLIENT"
component = record.name.split(".")[-1].upper()
comp_color = self.COMPONENT_COLORS.get(component, Color.RESET)
prefix = (
return (
f"{Color.GREY}[{time_str}]{Color.RESET} "
f"{comp_color}{Color.BOLD}[{component:6s}]{Color.RESET} "
f"{level_color}{record.getMessage()}{Color.RESET}"
)
return prefix
# ── Logger 工厂函数 ────────────────────────────────────────────
def get_logger(component: str, level: int = logging.DEBUG) -> logging.Logger:
def get_logger(component: str, level: int | None = None) -> logging.Logger:
"""
获取指定组件的 Logger 实例
获取指定组件的 Logger 实例日志级别与文件路径从 settings 读取
Args:
component: 组件名称 "CLIENT""LLM""MCP"
level: 日志级别默认 DEBUG
Returns:
配置好的 Logger 实例
level: 覆盖日志级别None 时从 settings 读取
"""
# 延迟导入避免循环依赖settings 初始化时也会用到 logger
from config.settings import settings as cfg
if level is None:
level = getattr(logging, cfg.logging.level, logging.DEBUG)
logger = logging.getLogger(f"agent.{component}")
logger.setLevel(level)
# 避免重复添加 Handler
if logger.handlers:
return logger
# 终端 Handler(彩色)
# 终端 Handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(ColorFormatter())
logger.addHandler(console_handler)
# 文件 Handler纯文本
log_dir = Path("logs")
log_dir.mkdir(exist_ok=True)
file_handler = logging.FileHandler(log_dir / "agent.log", encoding="utf-8")
file_handler.setFormatter(
logging.Formatter("[%(asctime)s] [%(name)s] %(levelname)s: %(message)s")
)
logger.addHandler(file_handler)
# 文件 Handler由配置控制开关
if cfg.logging.enable_file:
log_dir = Path(cfg.logging.log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(
log_dir / cfg.logging.log_file, encoding="utf-8"
)
file_handler.setFormatter(
logging.Formatter("[%(asctime)s] [%(name)s] %(levelname)s: %(message)s")
)
logger.addHandler(file_handler)
# 防止日志向上传播到 root logger
logger.propagate = False
return logger