This commit is contained in:
sontolau 2026-03-09 14:10:07 +08:00
parent 3640f3c216
commit efd1d2fd34
10 changed files with 2008 additions and 154 deletions

View File

@ -31,6 +31,8 @@ mcp:
- web_search
- file_reader
- code_executor
- static_analyzer
- ssh_docker
# ── 工具配置 ───────────────────────────────────────────────────
tools:
@ -50,6 +52,51 @@ tools:
calculator:
precision: 10
# ── C/C++ 静态分析 ──────────────────────────────────────────
static_analyzer:
default_tool: "cppcheck" # cppcheck | clang-tidy | infer
default_std: "c++17" # c89 | c99 | c11 | c++11 | c++14 | c++17 | c++20
timeout: 120 # 分析超时(秒)
jobs: 4 # 并行线程数cppcheck -j 参数)
output_format: "summary" # summary | json | full
max_issues: 500 # 最多返回问题条数
# 允许分析的目录白名单,空列表表示不限制
allowed_roots: [ ]
# 各工具的额外默认参数
tool_extra_args:
cppcheck: "--suppress=missingIncludeSystem --suppress=unmatchedSuppression"
clang-tidy: "--checks=*,-fuchsia-*,-google-*,-zircon-*"
infer: ""
# ── SSH Docker 部署 ─────────────────────────────────────────
ssh_docker:
default_ssh_port: 22
default_username: "root"
connect_timeout: 30 # SSH 连接超时(秒)
cmd_timeout: 120 # 单条命令执行超时(秒)
deploy_timeout: 300 # 镜像拉取/部署超时(秒)
default_restart_policy: "unless-stopped"
default_tail_lines: 100
# 安全:允许操作的服务器白名单,空列表表示不限制
allowed_hosts: [ ]
# 安全:禁止使用的镜像前缀
blocked_images: [ ]
# 是否允许 --privileged 模式
allow_privileged: false
# 已知服务器预设(可选,避免每次传入认证信息)
servers: { }
# 示例:
# servers:
# prod:
# host: "192.168.1.100"
# port: 22
# username: "deploy"
# key_path: "/home/ci/.ssh/id_rsa"
# staging:
# host: "192.168.1.200"
# port: 22
# username: "ubuntu"
# password: "" # 留空则读取环境变量 SSH_STAGING_PASSWORD
# ── 记忆配置 ───────────────────────────────────────────────────
memory:

View File

@ -1,6 +1,6 @@
"""
config/settings.py
配置加载与管理模块新增 OpenAI 专用字段
配置加载与管理 使用纯字典存储工具配置通过 settings.tools['tool_name']['key'] 访问
"""
import os
@ -16,12 +16,147 @@ except ImportError:
# ════════════════════════════════════════════════════════════════
# 配置数据类
# 默认配置(与 config.yaml 结构完全对应,作为 fallback
# ════════════════════════════════════════════════════════════════
_DEFAULTS: dict[str, Any] = {
"llm": {
"provider": "openai",
"model_name": "gpt-4o",
"api_key": "",
"api_base_url": "",
"max_tokens": 4096,
"temperature": 0.7,
"timeout": 60,
"max_retries": 3,
"function_calling": True,
"stream": False,
"model_path": "",
"ollama_host": "http://localhost:11434",
},
"mcp": {
"server_name": "DemoMCPServer",
"transport": "stdio",
"host": "localhost",
"port": 3000,
"enabled_tools": [
"calculator", "web_search", "file_reader",
"code_executor", "static_analyzer", "ssh_docker",
],
},
"tools": {
"calculator": {
"precision": 10,
},
"web_search": {
"max_results": 5,
"timeout": 10,
"api_key": "",
"engine": "mock",
},
"file_reader": {
"allowed_root": "./workspace",
"max_file_size_kb": 512,
},
"code_executor": {
"timeout": 5,
"sandbox": True,
},
"static_analyzer": {
"default_tool": "cppcheck",
"default_std": "c++17",
"timeout": 120,
"jobs": 4,
"output_format": "summary",
"max_issues": 500,
"allowed_roots": [],
"tool_extra_args": {
"cppcheck": "--suppress=missingIncludeSystem --suppress=unmatchedSuppression",
"clang-tidy": "--checks=*,-fuchsia-*,-google-*,-zircon-*",
"infer": "",
},
},
"ssh_docker": {
"default_ssh_port": 22,
"default_username": "root",
"connect_timeout": 30,
"cmd_timeout": 120,
"deploy_timeout": 300,
"default_restart_policy": "unless-stopped",
"default_tail_lines": 100,
"allowed_hosts": [],
"blocked_images": [],
"allow_privileged": False,
"servers": {},
},
},
"memory": {
"max_history": 20,
"enable_long_term": False,
"vector_db_url": "",
},
"logging": {
"level": "DEBUG",
"enable_file": True,
"log_dir": "./logs",
"log_file": "agent.log",
},
"agent": {
"max_chain_steps": 10,
"enable_multi_step": True,
"session_timeout": 3600,
"fallback_to_rules": True,
},
}
# ════════════════════════════════════════════════════════════════
# 工具配置字典视图(支持 settings.tools['web_search']['timeout']
# ════════════════════════════════════════════════════════════════
class ToolsView:
"""
工具配置字典视图
用法:
settings.tools['web_search']['timeout'] 10
settings.tools['static_analyzer']['jobs'] 4
settings.tools['ssh_docker']['connect_timeout'] 30
settings.tools['ssh_docker']['servers'] {...}
'web_search' in settings.tools True
"""
def __init__(self, data: dict[str, dict]):
self._data = data
def __getitem__(self, tool_name: str) -> dict[str, Any]:
if tool_name not in self._data:
raise KeyError(
f"工具 '{tool_name}' 未在配置中定义。"
f"可用工具: {list(self._data.keys())}"
)
return self._data[tool_name]
def __contains__(self, tool_name: str) -> bool:
return tool_name in self._data
def __repr__(self) -> str:
return f"ToolsView({list(self._data.keys())})"
def get(self, tool_name: str, default: Any = None) -> Any:
return self._data.get(tool_name, default)
def keys(self):
return self._data.keys()
# ════════════════════════════════════════════════════════════════
# LLM / MCP / Memory / Logging / Agent 轻量配置对象
# (保留 dataclass 方便属性访问,非工具类配置)
# ════════════════════════════════════════════════════════════════
@dataclass
class LLMConfig:
"""LLM 模型配置(含 OpenAI 专用字段)"""
provider: str = "openai"
model_name: str = "gpt-4o"
api_key: str = ""
@ -30,10 +165,8 @@ class LLMConfig:
temperature: float = 0.7
timeout: int = 60
max_retries: int = 3
# OpenAI 专用
function_calling: bool = True
stream: bool = False
# Ollama / 本地模型
model_path: str = ""
ollama_host: str = "http://localhost:11434"
@ -41,7 +174,6 @@ class LLMConfig:
self.api_key = os.getenv("LLM_API_KEY", self.api_key)
self.api_base_url = os.getenv("LLM_API_BASE_URL", self.api_base_url)
self.model_name = os.getenv("LLM_MODEL_NAME", self.model_name)
self.model_path = os.getenv("LLM_MODEL_PATH", self.model_path)
@dataclass
@ -51,46 +183,11 @@ class MCPConfig:
host: str = "localhost"
port: int = 3000
enabled_tools: list[str] = field(default_factory=lambda: [
"calculator", "web_search", "file_reader", "code_executor"
"calculator", "web_search", "file_reader",
"code_executor", "static_analyzer", "ssh_docker",
])
@dataclass
class WebSearchToolConfig:
max_results: int = 5
timeout: int = 10
api_key: str = ""
engine: str = "mock"
def __post_init__(self):
self.api_key = os.getenv("SEARCH_API_KEY", self.api_key)
@dataclass
class FileReaderToolConfig:
allowed_root: str = "./workspace"
max_file_size_kb: int = 512
@dataclass
class CodeExecutorToolConfig:
timeout: int = 5
sandbox: bool = True
@dataclass
class CalculatorToolConfig:
precision: int = 10
@dataclass
class ToolsConfig:
web_search: WebSearchToolConfig = field(default_factory=WebSearchToolConfig)
file_reader: FileReaderToolConfig = field(default_factory=FileReaderToolConfig)
code_executor: CodeExecutorToolConfig = field(default_factory=CodeExecutorToolConfig)
calculator: CalculatorToolConfig = field(default_factory=CalculatorToolConfig)
@dataclass
class MemoryConfig:
max_history: int = 20
@ -114,39 +211,82 @@ class AgentConfig:
max_chain_steps: int = 10
enable_multi_step: bool = True
session_timeout: int = 3600
fallback_to_rules: bool = True # API 失败时降级到规则引擎
fallback_to_rules: bool = True
@dataclass
# ════════════════════════════════════════════════════════════════
# 顶层 AppConfig
# ════════════════════════════════════════════════════════════════
class AppConfig:
llm: LLMConfig = field(default_factory=LLMConfig)
mcp: MCPConfig = field(default_factory=MCPConfig)
tools: ToolsConfig = field(default_factory=ToolsConfig)
memory: MemoryConfig = field(default_factory=MemoryConfig)
logging: LoggingConfig = field(default_factory=LoggingConfig)
agent: AgentConfig = field(default_factory=AgentConfig)
"""
全局配置单例
访问方式:
settings.llm.model_name
settings.mcp.enabled_tools
settings.tools['web_search']['timeout']
settings.tools['static_analyzer']['tool_extra_args']['cppcheck']
settings.tools['ssh_docker']['servers']['prod']['host']
settings.memory.max_history
settings.agent.fallback_to_rules
settings.logging.level
"""
def __init__(
self,
llm: LLMConfig,
mcp: MCPConfig,
tools: ToolsView,
memory: MemoryConfig,
logging: LoggingConfig,
agent: AgentConfig,
):
self.llm = llm
self.mcp = mcp
self.tools = tools
self.memory = memory
self.logging = logging
self.agent = agent
def display(self) -> str:
sa = self.tools['static_analyzer']
ssh = self.tools['ssh_docker']
ws = self.tools['web_search']
fr = self.tools['file_reader']
ce = self.tools['code_executor']
calc= self.tools['calculator']
lines = [
"" * 52,
"" * 62,
" 📋 当前配置",
"" * 52,
f" [LLM] provider = {self.llm.provider}",
f" [LLM] model_name = {self.llm.model_name}",
f" [LLM] api_key = {'***' if self.llm.api_key else '(未设置)'}",
f" [LLM] api_base_url = {self.llm.api_base_url or '(默认)'}",
f" [LLM] temperature = {self.llm.temperature}",
f" [LLM] max_tokens = {self.llm.max_tokens}",
f" [LLM] function_calling = {self.llm.function_calling}",
f" [LLM] stream = {self.llm.stream}",
f" [LLM] max_retries = {self.llm.max_retries}",
f" [MCP] server_name = {self.mcp.server_name}",
f" [MCP] enabled_tools = {self.mcp.enabled_tools}",
f" [MEMORY] max_history = {self.memory.max_history}",
f" [AGENT] multi_step = {self.agent.enable_multi_step}",
f" [AGENT] fallback_rules = {self.agent.fallback_to_rules}",
f" [LOG] level = {self.logging.level}",
"" * 52,
"" * 62,
f" [LLM] provider = {self.llm.provider}",
f" [LLM] model_name = {self.llm.model_name}",
f" [LLM] api_key = {'***' + self.llm.api_key[-4:] if len(self.llm.api_key) > 4 else '(未设置)'}",
f" [LLM] api_base_url = {self.llm.api_base_url or '(默认)'}",
f" [LLM] function_calling = {self.llm.function_calling}",
f" [LLM] temperature = {self.llm.temperature}",
f" [MCP] enabled_tools = {self.mcp.enabled_tools}",
f" [TOOL] calculator.precision= {calc['precision']}",
f" [TOOL] web_search.engine = {ws['engine']}",
f" [TOOL] web_search.timeout = {ws['timeout']}s",
f" [TOOL] file_reader.root = {fr['allowed_root']}",
f" [TOOL] code_executor.timeout={ce['timeout']}s",
f" [TOOL] static_analyzer.tool = {sa['default_tool']}",
f" [TOOL] static_analyzer.std = {sa['default_std']}",
f" [TOOL] static_analyzer.timeout = {sa['timeout']}s",
f" [TOOL] static_analyzer.jobs = {sa['jobs']}",
f" [TOOL] static_analyzer.roots = {sa['allowed_roots'] or '(不限制)'}",
f" [TOOL] ssh_docker.port = {ssh['default_ssh_port']}",
f" [TOOL] ssh_docker.user = {ssh['default_username']}",
f" [TOOL] ssh_docker.conn_timeout = {ssh['connect_timeout']}s",
f" [TOOL] ssh_docker.deploy_timeout= {ssh['deploy_timeout']}s",
f" [TOOL] ssh_docker.allowed_hosts = {ssh['allowed_hosts'] or '(不限制)'}",
f" [TOOL] ssh_docker.servers = {list(ssh['servers'].keys()) or '(无预设)'}",
f" [MEM] max_history = {self.memory.max_history}",
f" [AGT] fallback_rules = {self.agent.fallback_to_rules}",
f" [LOG] level = {self.logging.level}",
"" * 62,
]
return "\n".join(lines)
@ -156,8 +296,8 @@ class AppConfig:
# ════════════════════════════════════════════════════════════════
class ConfigLoader:
_CONFIG_SEARCH_PATHS = [
Path(os.getenv("AGENT_CONFIG_PATH", "./config.yaml")),
_SEARCH_PATHS = [
Path(os.getenv("AGENT_CONFIG_PATH", "__none__")),
Path("config") / "config.yaml",
Path("config.yaml"),
]
@ -165,15 +305,15 @@ class ConfigLoader:
@classmethod
def load(cls) -> AppConfig:
raw = cls._read_yaml()
return cls._parse(raw) if raw else AppConfig()
return cls._build(raw if raw is not None else {})
@classmethod
def _read_yaml(cls) -> dict[str, Any] | None:
if not _YAML_AVAILABLE:
print("⚠️ PyYAML 未安装pip install pyyaml使用默认配置")
return None
for path in cls._CONFIG_SEARCH_PATHS:
if path and path.exists():
for path in cls._SEARCH_PATHS:
if path and path.exists() and path.suffix in (".yaml", ".yml"):
with open(path, encoding="utf-8") as f:
data = yaml.safe_load(f)
print(f"✅ 已加载配置文件: {path.resolve()}")
@ -182,97 +322,129 @@ class ConfigLoader:
return None
@classmethod
def _parse(cls, raw: dict[str, Any]) -> AppConfig:
def _build(cls, raw: dict[str, Any]) -> AppConfig:
return AppConfig(
llm=cls._parse_llm(raw.get("llm", {})),
mcp=cls._parse_mcp(raw.get("mcp", {})),
tools=cls._parse_tools(raw.get("tools", {})),
memory=cls._parse_memory(raw.get("memory", {})),
logging=cls._parse_logging(raw.get("logging", {})),
agent=cls._parse_agent(raw.get("agent", {})),
llm=cls._build_llm(raw.get("llm", {})),
mcp=cls._build_mcp(raw.get("mcp", {})),
tools=cls._build_tools(raw.get("tools", {})),
memory=cls._build_memory(raw.get("memory", {})),
logging=cls._build_logging(raw.get("logging", {})),
agent=cls._build_agent(raw.get("agent", {})),
)
# ── LLM ───────────────────────────────────────────────────
@staticmethod
def _parse_llm(d: dict) -> LLMConfig:
def _build_llm(d: dict) -> LLMConfig:
df = _DEFAULTS["llm"]
return LLMConfig(
provider=d.get("provider", "openai"),
model_name=d.get("model_name", "gpt-4o"),
api_key=d.get("api_key", ""),
api_base_url=d.get("api_base_url", ""),
max_tokens=int(d.get("max_tokens", 4096)),
temperature=float(d.get("temperature", 0.7)),
timeout=int(d.get("timeout", 60)),
max_retries=int(d.get("max_retries", 3)),
function_calling=bool(d.get("function_calling", True)),
stream=bool(d.get("stream", False)),
model_path=d.get("model_path", ""),
ollama_host=d.get("ollama_host", "http://localhost:11434"),
provider=d.get("provider", df["provider"]),
model_name=d.get("model_name", df["model_name"]),
api_key=d.get("api_key", df["api_key"]),
api_base_url=d.get("api_base_url", df["api_base_url"]),
max_tokens=int(d.get("max_tokens", df["max_tokens"])),
temperature=float(d.get("temperature", df["temperature"])),
timeout=int(d.get("timeout", df["timeout"])),
max_retries=int(d.get("max_retries", df["max_retries"])),
function_calling=bool(d.get("function_calling", df["function_calling"])),
stream=bool(d.get("stream", df["stream"])),
model_path=d.get("model_path", df["model_path"]),
ollama_host=d.get("ollama_host", df["ollama_host"]),
)
# ── MCP ───────────────────────────────────────────────────
@staticmethod
def _parse_mcp(d: dict) -> MCPConfig:
def _build_mcp(d: dict) -> MCPConfig:
df = _DEFAULTS["mcp"]
return MCPConfig(
server_name=d.get("server_name", "DemoMCPServer"),
transport=d.get("transport", "stdio"),
host=d.get("host", "localhost"),
port=int(d.get("port", 3000)),
enabled_tools=d.get("enabled_tools", [
"calculator", "web_search", "file_reader", "code_executor"
]),
server_name=d.get("server_name", df["server_name"]),
transport=d.get("transport", df["transport"]),
host=d.get("host", df["host"]),
port=int(d.get("port", df["port"])),
enabled_tools=d.get("enabled_tools", df["enabled_tools"]),
)
@staticmethod
def _parse_tools(d: dict) -> ToolsConfig:
ws = d.get("web_search", {})
fr = d.get("file_reader", {})
ce = d.get("code_executor", {})
ca = d.get("calculator", {})
return ToolsConfig(
web_search=WebSearchToolConfig(
max_results=int(ws.get("max_results", 5)),
timeout=int(ws.get("timeout", 10)),
api_key=ws.get("api_key", ""),
engine=ws.get("engine", "mock"),
),
file_reader=FileReaderToolConfig(
allowed_root=fr.get("allowed_root", "./workspace"),
max_file_size_kb=int(fr.get("max_file_size_kb", 512)),
),
code_executor=CodeExecutorToolConfig(
timeout=int(ce.get("timeout", 5)),
sandbox=bool(ce.get("sandbox", True)),
),
calculator=CalculatorToolConfig(
precision=int(ca.get("precision", 10)),
),
)
# ── Tools纯字典深度合并默认值────────────────────────
@classmethod
def _build_tools(cls, d: dict) -> ToolsView:
df = _DEFAULTS["tools"]
merged: dict[str, dict] = {}
# 遍历所有已知工具,深度合并 yaml 值与默认值
for tool_name, tool_defaults in df.items():
yaml_tool = d.get(tool_name, {})
merged[tool_name] = cls._deep_merge(tool_defaults, yaml_tool)
# 处理 yaml 中额外定义的工具(不在默认列表中)
for tool_name, tool_cfg in d.items():
if tool_name not in merged:
merged[tool_name] = tool_cfg if isinstance(tool_cfg, dict) else {}
# 环境变量覆盖
cls._apply_env_overrides(merged)
return ToolsView(merged)
@staticmethod
def _parse_memory(d: dict) -> MemoryConfig:
def _deep_merge(base: dict, override: dict) -> dict:
"""
深度合并两个字典override 中的值覆盖 base 中的值
对于嵌套字典递归合并其他类型直接覆盖
"""
result = dict(base)
for key, val in override.items():
if (
key in result
and isinstance(result[key], dict)
and isinstance(val, dict)
):
result[key] = ConfigLoader._deep_merge(result[key], val)
else:
result[key] = val
return result
@staticmethod
def _apply_env_overrides(tools: dict[str, dict]) -> None:
"""从环境变量覆盖特定工具配置"""
# web_search.api_key
if api_key := os.getenv("SEARCH_API_KEY"):
tools["web_search"]["api_key"] = api_key
# ssh_docker servers 密码(格式: SSH_<SERVER_NAME>_PASSWORD
for server_name, srv in tools.get("ssh_docker", {}).get("servers", {}).items():
if isinstance(srv, dict) and not srv.get("password"):
env_key = f"SSH_{server_name.upper()}_PASSWORD"
if pw := os.getenv(env_key):
srv["password"] = pw
# ── Memory / Logging / Agent ──────────────────────────────
@staticmethod
def _build_memory(d: dict) -> MemoryConfig:
df = _DEFAULTS["memory"]
return MemoryConfig(
max_history=int(d.get("max_history", 20)),
enable_long_term=bool(d.get("enable_long_term", False)),
vector_db_url=d.get("vector_db_url", ""),
max_history=int(d.get("max_history", df["max_history"])),
enable_long_term=bool(d.get("enable_long_term",df["enable_long_term"])),
vector_db_url=d.get("vector_db_url", df["vector_db_url"]),
)
@staticmethod
def _parse_logging(d: dict) -> LoggingConfig:
def _build_logging(d: dict) -> LoggingConfig:
df = _DEFAULTS["logging"]
return LoggingConfig(
level=d.get("level", "DEBUG"),
enable_file=bool(d.get("enable_file", True)),
log_dir=d.get("log_dir", "./logs"),
log_file=d.get("log_file", "agent.log"),
level=d.get("level", df["level"]),
enable_file=bool(d.get("enable_file", df["enable_file"])),
log_dir=d.get("log_dir", df["log_dir"]),
log_file=d.get("log_file", df["log_file"]),
)
@staticmethod
def _parse_agent(d: dict) -> AgentConfig:
def _build_agent(d: dict) -> AgentConfig:
df = _DEFAULTS["agent"]
return AgentConfig(
max_chain_steps=int(d.get("max_chain_steps", 10)),
enable_multi_step=bool(d.get("enable_multi_step", True)),
session_timeout=int(d.get("session_timeout", 3600)),
fallback_to_rules=bool(d.get("fallback_to_rules", True)),
max_chain_steps=int(d.get("max_chain_steps", df["max_chain_steps"])),
enable_multi_step=bool(d.get("enable_multi_step", df["enable_multi_step"])),
session_timeout=int(d.get("session_timeout", df["session_timeout"])),
fallback_to_rules=bool(d.get("fallback_to_rules", df["fallback_to_rules"])),
)
# 全局单例
# ── 全局单例 ──────────────────────────────────────────────────
settings: AppConfig = ConfigLoader.load()

View File

@ -1589,3 +1589,420 @@ The function `get_system_name()` uses `platform.system()` to determine the syste
34*56 = 190...
[2026-03-09 13:35:15,930] [agent.CLIENT] INFO: 🎉 [CLIENT] 流程完成,回复已返回
[2026-03-09 13:39:06,494] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 13:39:06,494] [agent.SYSTEM] INFO: ────────────────────────────────────────────────────
📋 当前配置
────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] temperature = 0.7
[LLM] max_tokens = 4096
[LLM] function_calling = True
[LLM] stream = False
[LLM] max_retries = 3
[MCP] server_name = DemoMCPServer
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[MEMORY] max_history = 20
[AGENT] multi_step = True
[AGENT] fallback_rules = True
[LOG] level = DEBUG
────────────────────────────────────────────────────
[2026-03-09 13:39:06,495] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 13:39:06,495] [agent.MCP] INFO: transport = stdio
[2026-03-09 13:39:06,496] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:39:06,497] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 13:39:06,498] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 13:39:06,498] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 13:39:06,498] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 13:39:06,498] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 13:39:06,498] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 13:39:06,499] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 13:39:06,499] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 13:39:06,499] [agent.LLM] INFO: 🏭 Provider 工厂: 创建 [openai] Provider
[2026-03-09 13:39:08,884] [agent.LLM] INFO: 🔗 使用自定义 API 地址: https://openapi.monica.im/v1
[2026-03-09 13:39:09,057] [agent.LLM] INFO: ✅ OpenAI 客户端初始化完成
model = gpt-4o
base_url = https://openapi.monica.im/v1
max_retries= 3
[2026-03-09 13:39:09,058] [agent.LLM] INFO: 🧠 LLM 引擎初始化完成
[2026-03-09 13:39:09,058] [agent.LLM] INFO: provider = openai
[2026-03-09 13:39:09,058] [agent.LLM] INFO: model_name = gpt-4o
[2026-03-09 13:39:09,058] [agent.LLM] INFO: function_calling = True
[2026-03-09 13:39:09,058] [agent.LLM] INFO: temperature = 0.7
[2026-03-09 13:39:09,059] [agent.LLM] INFO: fallback_rules = True
[2026-03-09 13:39:09,059] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 13:39:09,060] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成OpenAI Function Calling 模式)
[2026-03-09 13:39:09,061] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor']
[2026-03-09 13:39:25,884] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:39:25,884] [agent.CLIENT] INFO: 📨 收到用户输入: 先计算34乘以12再将结果乘以56
[2026-03-09 13:39:25,884] [agent.CLIENT] INFO: ════════════════════════════════════════════════════════════
[2026-03-09 13:39:25,885] [agent.MEMORY] DEBUG: 💬 [USER] 先计算34乘以12再将结果乘以56...
[2026-03-09 13:39:25,885] [agent.CLIENT] INFO: 🗺 [LLM] 规划工具调用链...
[2026-03-09 13:39:25,885] [agent.LLM] INFO: 🗺 规划工具调用链: 先计算34乘以12再将结果乘以56...
[2026-03-09 13:39:25,886] [agent.LLM] DEBUG: 📤 发送规划请求tools 数量: 4
[2026-03-09 13:39:25,886] [agent.LLM] DEBUG: 📤 消息历史长度: 3
[2026-03-09 13:39:28,752] [agent.LLM] INFO: 📊 Token 用量: prompt=405, completion=17
[2026-03-09 13:39:28,752] [agent.LLM] INFO: 📋 解析到 1 个工具调用步骤
[2026-03-09 13:39:28,753] [agent.LLM] INFO: 📋 OpenAI 规划完成: 1 步
[2026-03-09 13:39:28,753] [agent.LLM] INFO: Step 1: [calculator] args={'expression': '34*12'}
[2026-03-09 13:39:28,753] [agent.CLIENT] INFO:
────────────────────────────────────────────────────────────
🔗 开始执行工具调用链
目标: calculator
步骤: 1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:39:28,753] [agent.CLIENT] DEBUG: 🔑 预生成 tool_call_ids: {1: 'call_d656a0cfe9ab'}
[2026-03-09 13:39:28,753] [agent.CLIENT] INFO:
▶ Step 1 执行中
工具 : [calculator]
说明 : 调用 calculator由 OpenAI Function Calling 规划)
参数 : {'expression': '34*12'}
call_id : call_d656a0cfe9ab
[2026-03-09 13:39:28,754] [agent.MCP] INFO: 📨 收到请求 id=df72b664 method=tools/call transport=stdio
[2026-03-09 13:39:28,754] [agent.TOOL] INFO: ▶ 执行工具 [calculator],参数: {'expression': '34*12'}
[2026-03-09 13:39:28,754] [agent.TOOL] INFO: ✅ 工具 [calculator] 执行成功
[2026-03-09 13:39:28,754] [agent.CLIENT] INFO: ✅ Step 1 成功: 34*12 = 408...
[2026-03-09 13:39:28,754] [agent.MEMORY] DEBUG: 💬 [TOOL] 34*12 = 408...
[2026-03-09 13:39:28,754] [agent.CLIENT] DEBUG: 📦 OpenAI 消息块结构:
[2026-03-09 13:39:28,754] [agent.CLIENT] DEBUG: [0] assistant tool_calls.ids = ['call_d656a0cfe9ab']
[2026-03-09 13:39:28,755] [agent.CLIENT] DEBUG: [1] tool tool_call_id = call_d656a0cfe9ab content = 34*12 = 408...
[2026-03-09 13:39:28,755] [agent.CLIENT] INFO: ────────────────────────────────────────────────────────────
✅ 调用链执行完成
完成: 1/1 步
────────────────────────────────────────────────────────────
[2026-03-09 13:39:28,755] [agent.CLIENT] INFO: ✍️ [LLM] 调用 OpenAI 生成最终回复...
[2026-03-09 13:39:28,755] [agent.LLM] INFO: ✍️ 生成最终回复(工具调用链模式)...
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: 📤 发送回复请求,消息数: 4
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: 📋 消息序列结构:
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: [0] system 你是一个友好、专业的 AI 助手。
请基于已执行的工具调用结果,用清晰、自然的语言回答用户的问题。
...
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: [1] user 先计算34乘以12再将结果乘以56...
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: [2] assistant tool_calls=['calculator'] ids=['call_d656a0cfe9ab']
[2026-03-09 13:39:28,755] [agent.LLM] DEBUG: [3] tool tool_call_id=call_d656a0cfe9ab content=34*12 = 408...
[2026-03-09 13:39:28,756] [agent.LLM] DEBUG: 📤 发送回复生成请求,消息长度: 4
[2026-03-09 13:39:30,293] [agent.LLM] INFO: ✅ 回复生成成功,长度: 27 charsToken: 18
[2026-03-09 13:39:30,294] [agent.LLM] INFO: ✅ OpenAI 回复生成成功 (27 chars)
[2026-03-09 13:39:30,294] [agent.MEMORY] DEBUG: 💬 [ASSISTANT] 34乘以12的结果是408。接下来计算408乘以56。...
[2026-03-09 13:39:30,294] [agent.CLIENT] INFO: 🎉 流程完成,回复已返回
[2026-03-09 14:03:41,261] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:03:41,271] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:03:41,271] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:03:41,276] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:03:41,278] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:03:41,278] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:05:16,524] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:05:16,529] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:05:16,529] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:05:16,530] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:05:16,530] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:05:16,531] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:06:01,148] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:06:01,156] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:06:01,157] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:06:01,161] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:06:01,161] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:06:01,161] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:06:11,760] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:06:11,766] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:06:11,766] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:06:11,767] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:06:11,767] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:06:11,768] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:06:11,768] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 14:06:11,769] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 14:06:22,925] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:06:22,939] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:06:22,940] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:06:22,942] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:06:22,942] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:06:22,942] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:06:22,943] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 14:06:22,943] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 14:06:44,043] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:06:44,046] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:06:44,046] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:06:44,048] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:06:44,048] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:06:44,048] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:06:44,049] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 14:06:44,049] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 14:06:44,049] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 14:06:44,050] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 14:07:04,527] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:07:04,531] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:07:04,532] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:07:04,532] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:07:04,533] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:07:04,533] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:07:04,533] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 14:07:04,533] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 14:07:04,533] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 14:07:04,533] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 14:07:04,534] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 14:07:04,534] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 14:07:40,023] [agent.TOOL.SSHDocker] WARNING: ⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0
[2026-03-09 14:07:40,029] [agent.SYSTEM] INFO: 🔧 开始组装 Agent 系统OpenAI Function Calling 模式)...
[2026-03-09 14:07:40,029] [agent.SYSTEM] INFO: ──────────────────────────────────────────────────────────────
📋 当前配置
──────────────────────────────────────────────────────────────
[LLM] provider = openai
[LLM] model_name = gpt-4o
[LLM] api_key = ***ACjR
[LLM] api_base_url = https://openapi.monica.im/v1
[LLM] function_calling = True
[LLM] temperature = 0.7
[MCP] enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[TOOL] calculator.precision= 10
[TOOL] web_search.engine = mock
[TOOL] web_search.timeout = 10s
[TOOL] file_reader.root = ./workspace
[TOOL] code_executor.timeout=5s
[TOOL] static_analyzer.tool = cppcheck
[TOOL] static_analyzer.std = c++17
[TOOL] static_analyzer.timeout = 120s
[TOOL] static_analyzer.jobs = 4
[TOOL] static_analyzer.roots = (不限制)
[TOOL] ssh_docker.port = 22
[TOOL] ssh_docker.user = root
[TOOL] ssh_docker.conn_timeout = 30s
[TOOL] ssh_docker.deploy_timeout= 300s
[TOOL] ssh_docker.allowed_hosts = (不限制)
[TOOL] ssh_docker.servers = (无预设)
[MEM] max_history = 20
[AGT] fallback_rules = True
[LOG] level = DEBUG
──────────────────────────────────────────────────────────────
[2026-03-09 14:07:40,032] [agent.MCP] INFO: 🚀 MCP Server [DemoMCPServer] 启动
[2026-03-09 14:07:40,032] [agent.MCP] INFO: transport = stdio
[2026-03-09 14:07:40,032] [agent.MCP] INFO: enabled_tools = ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']
[2026-03-09 14:07:40,033] [agent.TOOL] DEBUG: ⚙️ Calculator 精度: 10
[2026-03-09 14:07:40,034] [agent.MCP] INFO: 📌 注册工具: [calculator] — 计算数学表达式,支持加减乘除、幂运算、括号等
[2026-03-09 14:07:40,034] [agent.TOOL] DEBUG: ⚙️ WebSearch engine=mock, max_results=5, api_key=(未设置)
[2026-03-09 14:07:40,035] [agent.MCP] INFO: 📌 注册工具: [web_search] — 在互联网上搜索信息,返回相关网页摘要
[2026-03-09 14:07:40,035] [agent.TOOL] DEBUG: ⚙️ FileReader root=workspace, max_size=512KB
[2026-03-09 14:07:40,035] [agent.MCP] INFO: 📌 注册工具: [file_reader] — 读取本地文件内容,仅限配置的 allowed_root 目录
[2026-03-09 14:07:40,036] [agent.TOOL] DEBUG: ⚙️ CodeExecutor timeout=5s, sandbox=True
[2026-03-09 14:07:40,036] [agent.MCP] INFO: 📌 注册工具: [code_executor] — 在沙箱环境中执行 Python 代码片段,返回标准输出
[2026-03-09 14:07:40,036] [agent.MCP] INFO: 📌 注册工具: [static_analyzer] — 对指定目录下的 C/C++ 工程调用外部静态分析工具cppcheck/clang-tidy/infer进行代码质量检查返回错误、警告及代码风格问题
[2026-03-09 14:07:40,036] [agent.MCP] INFO: 📌 注册工具: [ssh_docker] — 通过 SSH 连接到远程服务器,使用 Docker 部署和管理容器应用。支持: deploy | start | stop | restart | status | logs | remove | compose_up | compose_down | compose_ps | pull | inspect | stats
[2026-03-09 14:07:40,037] [agent.LLM] INFO: 🏭 Provider 工厂: 创建 [openai] Provider
[2026-03-09 14:07:42,643] [agent.LLM] INFO: 🔗 使用自定义 API 地址: https://openapi.monica.im/v1
[2026-03-09 14:07:42,832] [agent.LLM] INFO: ✅ OpenAI 客户端初始化完成
model = gpt-4o
base_url = https://openapi.monica.im/v1
max_retries= 3
[2026-03-09 14:07:42,833] [agent.LLM] INFO: 🧠 LLM 引擎初始化完成
[2026-03-09 14:07:42,833] [agent.LLM] INFO: provider = openai
[2026-03-09 14:07:42,833] [agent.LLM] INFO: model_name = gpt-4o
[2026-03-09 14:07:42,833] [agent.LLM] INFO: function_calling = True
[2026-03-09 14:07:42,834] [agent.LLM] INFO: temperature = 0.7
[2026-03-09 14:07:42,834] [agent.LLM] INFO: fallback_rules = True
[2026-03-09 14:07:42,834] [agent.MEMORY] INFO: 💾 Memory 初始化,最大历史: 20 条
[2026-03-09 14:07:42,834] [agent.CLIENT] INFO: 💻 Agent Client 初始化完成OpenAI Function Calling 模式)
[2026-03-09 14:07:42,835] [agent.SYSTEM] INFO: ✅ Agent 组装完成,已注册工具: ['calculator', 'web_search', 'file_reader', 'code_executor', 'static_analyzer', 'ssh_docker']

View File

@ -23,6 +23,8 @@ from tools.calculator import CalculatorTool
from tools.code_executor import CodeExecutorTool
from tools.file_reader import FileReaderTool
from tools.web_search import WebSearchTool
from tools.static_analyzer import StaticAnalyzerTool
from tools.ssh_docker import SSHDockerTool
from utils.logger import get_logger
logger = get_logger("SYSTEM")
@ -32,6 +34,8 @@ _ALL_TOOLS = {
"web_search": WebSearchTool,
"file_reader": FileReaderTool,
"code_executor": CodeExecutorTool,
"static_analyzer": StaticAnalyzerTool,
"ssh_docker": SSHDockerTool
}

View File

@ -27,7 +27,7 @@ class CalculatorTool(BaseTool):
def __init__(self):
super().__init__()
# 从配置读取精度
self._precision = settings.tools.calculator.precision
self._precision = settings.tools['calculator']['precision']
self.logger.debug(f"⚙️ Calculator 精度: {self._precision}")
def execute(self, expression: str, **_) -> ToolResult:

View File

@ -29,9 +29,9 @@ class CodeExecutorTool(BaseTool):
def __init__(self):
super().__init__()
cfg = settings.tools.code_executor
self._timeout = cfg.timeout
self._sandbox = cfg.sandbox
cfg = settings.tools['code_executor']
self._timeout = cfg['timeout']
self._sandbox = cfg['sandbox']
self.logger.debug(
f"⚙️ CodeExecutor timeout={self._timeout}s, sandbox={self._sandbox}"
)

View File

@ -18,9 +18,9 @@ class FileReaderTool(BaseTool):
def __init__(self):
super().__init__()
cfg = settings.tools.file_reader
self._allowed_root = Path(cfg.allowed_root)
self._max_size_kb = cfg.max_file_size_kb
cfg = settings.tools['file_reader']
self._allowed_root = Path(cfg['allowed_root'])
self._max_size_kb = cfg['max_file_size_kb']
self.logger.debug(
f"⚙️ FileReader root={self._allowed_root}, "
f"max_size={self._max_size_kb}KB"

732
tools/ssh_docker.py Normal file
View File

@ -0,0 +1,732 @@
"""
tools/ssh_docker.py
SSH 远程 Docker 部署工具 所有配置通过 settings.tools['ssh_docker'][key] 获取
依赖: pip install paramiko>=3.0.0
"""
import json
import re
import time
from dataclasses import dataclass, field
from config.settings import settings
from utils.logger import get_logger
logger = get_logger("TOOL.SSHDocker")
try:
import paramiko
_PARAMIKO_AVAILABLE = True
except ImportError:
_PARAMIKO_AVAILABLE = False
logger.warning("⚠️ paramiko 未安装,请执行: pip install paramiko>=3.0.0")
# ════════════════════════════════════════════════════════════════
# 配置访问快捷函数
# ════════════════════════════════════════════════════════════════
def _cfg(key: str, fallback=None):
"""读取 ssh_docker 工具配置,不存在时返回 fallback"""
return settings.tools['ssh_docker'].get(key, fallback)
# ════════════════════════════════════════════════════════════════
# 数据结构
# ════════════════════════════════════════════════════════════════
@dataclass
class SSHConfig:
host: str
port: int = 22
username: str = "root"
password: str = ""
key_path: str = ""
timeout: int = 30
cmd_timeout: int = 120
@classmethod
def from_kwargs(cls, kwargs: dict) -> "SSHConfig":
"""
从调用参数构造 SSHConfig
支持通过 server 名称引用 config.yaml 中的预设
缺省值全部来自 config.yaml tools.ssh_docker
"""
server_name = kwargs.get("server", "")
if server_name:
servers = _cfg('servers', {})
preset = servers.get(server_name)
if not preset:
raise ValueError(
f"服务器预设 '{server_name}' 未在 config.yaml "
f"tools.ssh_docker.servers 中定义\n"
f"已有预设: {list(servers.keys())}"
)
logger.info(f"📋 使用服务器预设: {server_name}{preset.get('host')}")
return cls(
host=preset.get("host", ""),
port=int(preset.get("port", _cfg('default_ssh_port', 22))),
username=preset.get("username", _cfg('default_username', 'root')),
password=preset.get("password", ""),
key_path=preset.get("key_path", ""),
timeout=_cfg('connect_timeout', 30),
cmd_timeout=_cfg('cmd_timeout', 120),
)
return cls(
host=kwargs.get("host", ""),
port=int(kwargs.get("port", _cfg('default_ssh_port', 22))),
username=kwargs.get("username", _cfg('default_username', 'root')),
password=kwargs.get("password", ""),
key_path=kwargs.get("key_path", ""),
timeout=_cfg('connect_timeout', 30),
cmd_timeout=_cfg('cmd_timeout', 120),
)
@dataclass
class CommandResult:
command: str
stdout: str
stderr: str
exit_code: int
success: bool = True
@property
def output(self) -> str:
return self.stdout.strip() or self.stderr.strip()
@dataclass
class DeployConfig:
image: str
container_name: str
action: str = "deploy"
ports: list[str] = field(default_factory=list)
volumes: list[str] = field(default_factory=list)
env_vars: dict[str, str] = field(default_factory=dict)
network: str = ""
restart_policy: str = ""
command: str = ""
compose_file: str = ""
pull_latest: bool = True
extra_args: str = ""
def __post_init__(self):
# 重启策略缺省值来自 config.yaml
if not self.restart_policy:
self.restart_policy = _cfg('default_restart_policy', 'unless-stopped')
# ════════════════════════════════════════════════════════════════
# SSH 连接管理器
# ════════════════════════════════════════════════════════════════
class SSHManager:
def __init__(self, cfg: SSHConfig):
self.cfg = cfg
self.client: "paramiko.SSHClient | None" = None
def connect(self) -> None:
if not _PARAMIKO_AVAILABLE:
raise RuntimeError("paramiko 未安装,请执行: pip install paramiko>=3.0.0")
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
connect_kwargs: dict = {
"hostname": self.cfg.host,
"port": self.cfg.port,
"username": self.cfg.username,
"timeout": self.cfg.timeout,
}
if self.cfg.key_path:
logger.info(f"🔑 使用密钥认证: {self.cfg.key_path}")
connect_kwargs["key_filename"] = self.cfg.key_path
elif self.cfg.password:
logger.info("🔐 使用密码认证")
connect_kwargs["password"] = self.cfg.password
else:
logger.info("🔓 尝试 SSH Agent / 默认密钥认证")
self.client.connect(**connect_kwargs)
logger.info(
f"✅ SSH 连接成功: {self.cfg.username}@{self.cfg.host}:{self.cfg.port}\n"
f" 连接超时: {self.cfg.timeout}s "
f"[config.yaml connect_timeout={_cfg('connect_timeout')}s]\n"
f" 命令超时: {self.cfg.cmd_timeout}s "
f"[config.yaml cmd_timeout={_cfg('cmd_timeout')}s]"
)
def exec(self, command: str, timeout: int | None = None) -> CommandResult:
if not self.client:
raise RuntimeError("SSH 未连接,请先调用 connect()")
t = timeout or self.cfg.cmd_timeout
logger.debug(f"🖥 执行命令 (timeout={t}s): {command}")
_, stdout, stderr = self.client.exec_command(command, timeout=t)
exit_code = stdout.channel.recv_exit_status()
out = stdout.read().decode("utf-8", errors="replace")
err = stderr.read().decode("utf-8", errors="replace")
result = CommandResult(
command=command, stdout=out, stderr=err,
exit_code=exit_code, success=(exit_code == 0),
)
logger.debug(f" exit={exit_code} out={out[:80]} err={err[:80]}")
return result
def close(self) -> None:
if self.client:
self.client.close()
self.client = None
def __enter__(self):
self.connect()
return self
def __exit__(self, *_):
self.close()
# ════════════════════════════════════════════════════════════════
# Docker 操作执行器
# ════════════════════════════════════════════════════════════════
class DockerExecutor:
ALLOWED_ACTIONS = {
"deploy", "start", "stop", "restart",
"status", "logs", "remove",
"compose_up", "compose_down", "compose_ps",
"pull", "inspect", "stats",
}
def __init__(self, ssh: SSHManager):
self.ssh = ssh
def check_docker(self) -> CommandResult:
return self.ssh.exec(
"docker --version && docker info --format '{{.ServerVersion}}'"
)
def pull_image(self, image: str) -> CommandResult:
logger.info(f"📥 拉取镜像: {image}")
return self.ssh.exec(
f"docker pull {image}",
timeout=_cfg('deploy_timeout', 300),
)
def deploy(self, cfg: DeployConfig) -> list[CommandResult]:
results = []
if cfg.pull_latest:
results.append(self.pull_image(cfg.image))
results.append(self.ssh.exec(
f"docker stop {cfg.container_name} 2>/dev/null || true"
))
results.append(self.ssh.exec(
f"docker rm {cfg.container_name} 2>/dev/null || true"
))
cmd = self._build_run_command(cfg)
logger.info(f"🚀 启动容器: {cmd}")
results.append(self.ssh.exec(cmd, timeout=_cfg('deploy_timeout', 300)))
return results
def start(self, name: str) -> CommandResult: return self.ssh.exec(f"docker start {name}")
def stop(self, name: str) -> CommandResult: return self.ssh.exec(f"docker stop {name}")
def restart(self, name: str) -> CommandResult: return self.ssh.exec(f"docker restart {name}")
def remove(self, name: str, force: bool = True) -> CommandResult:
return self.ssh.exec(f"docker rm {'-f' if force else ''} {name}")
def status(self, name: str) -> CommandResult:
cmd = (
f"docker inspect {name} "
f"--format '{{{{.Name}}}} | {{{{.State.Status}}}} | "
f"Started: {{{{.State.StartedAt}}}} | Image: {{{{.Config.Image}}}}'"
f" 2>/dev/null || echo 'Container {name} not found'"
)
return self.ssh.exec(cmd)
def logs(self, name: str, tail: int | None = None) -> CommandResult:
n = tail if tail is not None else _cfg('default_tail_lines', 100)
logger.info(
f"📋 获取日志: {name} tail={n} "
f"[config.yaml default_tail_lines={_cfg('default_tail_lines')}]"
)
return self.ssh.exec(f"docker logs --tail={n} --timestamps {name} 2>&1")
def inspect(self, name: str) -> CommandResult:
return self.ssh.exec(f"docker inspect {name}")
def stats(self, name: str) -> CommandResult:
return self.ssh.exec(
f"docker stats {name} --no-stream "
f"--format 'table {{{{.Name}}}}\t{{{{.CPUPerc}}}}\t"
f"{{{{.MemUsage}}}}\t{{{{.NetIO}}}}'"
)
def compose_up(self, compose_file: str, detach: bool = True) -> CommandResult:
work_dir = compose_file.rsplit("/", 1)[0] if "/" in compose_file else "."
logger.info(f"🐙 Compose Up: {compose_file}")
return self.ssh.exec(
f"cd {work_dir} && docker compose -f {compose_file} "
f"up {'-d' if detach else ''} --pull always",
timeout=_cfg('deploy_timeout', 300),
)
def compose_down(self, compose_file: str) -> CommandResult:
work_dir = compose_file.rsplit("/", 1)[0] if "/" in compose_file else "."
return self.ssh.exec(
f"cd {work_dir} && docker compose -f {compose_file} down"
)
def compose_ps(self, compose_file: str) -> CommandResult:
work_dir = compose_file.rsplit("/", 1)[0] if "/" in compose_file else "."
return self.ssh.exec(
f"cd {work_dir} && docker compose -f {compose_file} ps"
)
@staticmethod
def _build_run_command(cfg: DeployConfig) -> str:
"""
构造 docker run 命令
安全检查config.yaml allow_privileged=false 时拒绝 --privileged
"""
if "--privileged" in cfg.extra_args and not _cfg('allow_privileged', False):
logger.warning(
"⚠️ 已移除 --privileged 参数\n"
" 如需启用请在 config.yaml → "
"tools.ssh_docker.allow_privileged 设置为 true"
)
cfg.extra_args = cfg.extra_args.replace("--privileged", "").strip()
parts = ["docker", "run", "-d", f"--name {cfg.container_name}"]
if cfg.restart_policy:
parts.append(f"--restart {cfg.restart_policy}")
for p in cfg.ports:
parts.append(f"-p {p}")
for v in cfg.volumes:
parts.append(f"-v {v}")
for k, val in cfg.env_vars.items():
safe_val = str(val).replace('"', '\\"')
parts.append(f'-e {k}="{safe_val}"')
if cfg.network:
parts.append(f"--network {cfg.network}")
if cfg.extra_args:
parts.append(cfg.extra_args)
parts.append(cfg.image)
if cfg.command:
parts.append(cfg.command)
return " ".join(parts)
# ════════════════════════════════════════════════════════════════
# 主工具类
# ════════════════════════════════════════════════════════════════
class SSHDockerTool:
"""
SSH 远程 Docker 部署工具
所有配置均通过 settings.tools['ssh_docker'][key] 读取
"""
name = "ssh_docker"
description = (
"通过 SSH 连接到远程服务器,使用 Docker 部署和管理容器应用。"
"支持: deploy | start | stop | restart | status | logs | "
"remove | compose_up | compose_down | compose_ps | pull | inspect | stats"
)
parameters = {
"host": {
"type": "string",
"description": "远程服务器 IP 或域名(与 server 参数二选一)",
},
"server": {
"type": "string",
"description": (
"使用 config.yaml tools.ssh_docker.servers 中预设的服务器名称"
"(与 host 二选一),例如 'prod''staging'"
),
},
"username": {
"type": "string",
"description": "SSH 用户名(不传则使用 config.yaml default_username",
},
"action": {
"type": "string",
"description": (
"Docker 操作类型: deploy部署| start | stop | restart | "
"status查看状态| logs查看日志| remove删除| "
"compose_up | compose_down | compose_ps | pull | inspect | stats"
),
"enum": sorted(DockerExecutor.ALLOWED_ACTIONS),
},
"image": {
"type": "string",
"description": "Docker 镜像名称,例如 nginx:latestdeploy/pull 时必填)",
},
"container_name": {
"type": "string",
"description": "容器名称,例如 my-nginx",
},
"port": {
"type": "integer",
"description": "SSH 端口(不传则使用 config.yaml default_ssh_port",
},
"password": {
"type": "string",
"description": "SSH 密码(与 key_path 二选一)",
},
"key_path": {
"type": "string",
"description": "SSH 私钥路径,例如 /home/user/.ssh/id_rsa",
},
"ports": {
"type": "string",
"description": "端口映射,逗号分隔,例如 '8080:80,443:443'",
},
"volumes": {
"type": "string",
"description": "数据卷挂载,逗号分隔,例如 '/data:/app/data,/logs:/var/log'",
},
"env_vars": {
"type": "string",
"description": "环境变量 JSON 字符串,例如 '{\"DB_HOST\":\"localhost\",\"DB_PORT\":\"5432\"}'",
},
"network": {
"type": "string",
"description": "Docker 网络名称,例如 bridge 或 my-network",
},
"restart_policy": {
"type": "string",
"description": "重启策略(不传则使用 config.yaml default_restart_policy: no | always | unless-stopped | on-failure",
},
"compose_file": {
"type": "string",
"description": "docker-compose.yml 在远程服务器上的绝对路径",
},
"pull_latest": {
"type": "boolean",
"description": "部署前是否拉取最新镜像,默认 true",
},
"tail_lines": {
"type": "integer",
"description": "查看日志时返回的行数(不传则使用 config.yaml default_tail_lines",
},
"extra_args": {
"type": "string",
"description": "传递给 docker run 的额外参数,例如 '--memory=512m --cpus=1'",
},
}
def execute(self, **kwargs) -> str:
# ── 解析参数,缺省值全部来自 config.yaml ──────────────
action = kwargs.get("action", "status").lower()
image = kwargs.get("image", "")
container_name = kwargs.get("container_name", "")
ports_str = kwargs.get("ports", "")
volumes_str = kwargs.get("volumes", "")
env_vars_str = kwargs.get("env_vars", "{}")
network = kwargs.get("network", "")
restart_policy = kwargs.get("restart_policy", "") # 空→由 DeployConfig.__post_init__ 填充
compose_file = kwargs.get("compose_file", "")
pull_latest = bool(kwargs.get("pull_latest", True))
tail_lines_raw = kwargs.get("tail_lines", None)
tail_lines = int(tail_lines_raw) if tail_lines_raw is not None else None
extra_args = kwargs.get("extra_args", "")
logger.info(
f"🐳 SSH Docker 操作启动\n"
f" 操作 : {action}\n"
f" 容器 : {container_name or '(未指定)'}\n"
f" 镜像 : {image or '(未指定)'}\n"
f" server预设: {kwargs.get('server', '(无)')} "
f"host: {kwargs.get('host', '(无)')}\n"
f" deploy_timeout : {_cfg('deploy_timeout')}s "
f"[config.yaml]\n"
f" allow_privileged: {_cfg('allow_privileged')} "
f"[config.yaml]"
)
# ── 参数校验 ──────────────────────────────────────────
err = self._validate(kwargs, action, image, container_name, compose_file)
if err:
return err
# ── 解析复合参数 ──────────────────────────────────────
ports = [p.strip() for p in ports_str.split(",") if p.strip()]
volumes = [v.strip() for v in volumes_str.split(",") if v.strip()]
try:
env_vars: dict = json.loads(env_vars_str) if env_vars_str.strip() else {}
except json.JSONDecodeError:
return f"❌ env_vars 格式错误,请使用 JSON 格式: {env_vars_str}"
# ── 镜像黑名单检查(来自 config.yaml blocked_images──
if image:
blocked = _cfg('blocked_images', [])
if any(image.startswith(b) for b in blocked):
return (
f"❌ 安全限制: 镜像 '{image}' 在黑名单中\n"
f" 黑名单: {blocked}\n"
f" 请在 config.yaml → tools.ssh_docker.blocked_images 中移除"
)
# ── 构造配置对象 ──────────────────────────────────────
try:
ssh_cfg = SSHConfig.from_kwargs(kwargs)
except ValueError as e:
return f"❌ SSH 配置错误: {e}"
deploy_cfg = DeployConfig(
image=image,
container_name=container_name,
action=action,
ports=ports,
volumes=volumes,
env_vars=env_vars,
network=network,
restart_policy=restart_policy,
compose_file=compose_file,
pull_latest=pull_latest,
extra_args=extra_args,
)
# ── 执行操作 ──────────────────────────────────────────
try:
with SSHManager(ssh_cfg) as ssh:
executor = DockerExecutor(ssh)
return self._dispatch(action, executor, deploy_cfg, tail_lines)
except Exception as e:
error_msg = str(e)
logger.error(f"❌ SSH Docker 操作失败: {error_msg}")
return self._format_error(action, ssh_cfg.host, error_msg)
# ── 操作分发 ──────────────────────────────────────────────
def _dispatch(
self,
action: str,
executor: DockerExecutor,
cfg: DeployConfig,
tail_lines: int | None,
) -> str:
# 先检查 Docker 环境
check = executor.check_docker()
if not check.success:
return (
f"❌ 远程服务器 Docker 不可用\n"
f" 错误: {check.stderr[:200]}\n"
f" 请确认 Docker 已安装并运行: sudo systemctl start docker"
)
match action:
case "deploy":
return self._do_deploy(executor, cfg)
case "start":
return self._fmt_single(executor.start(cfg.container_name), "start")
case "stop":
return self._fmt_single(executor.stop(cfg.container_name), "stop")
case "restart":
return self._fmt_single(executor.restart(cfg.container_name), "restart")
case "status":
return self._do_status(executor, cfg.container_name)
case "logs":
return self._do_logs(executor, cfg.container_name, tail_lines)
case "remove":
return self._fmt_single(executor.remove(cfg.container_name), "remove")
case "pull":
return self._fmt_single(executor.pull_image(cfg.image), "pull")
case "inspect":
return self._do_inspect(executor, cfg.container_name)
case "stats":
return self._fmt_single(executor.stats(cfg.container_name), "stats")
case "compose_up":
return self._fmt_single(executor.compose_up(cfg.compose_file), "compose_up")
case "compose_down":
return self._fmt_single(executor.compose_down(cfg.compose_file), "compose_down")
case "compose_ps":
return self._fmt_single(executor.compose_ps(cfg.compose_file), "compose_ps")
case _:
return f"❌ 不支持的操作: {action}"
def _do_deploy(self, executor: DockerExecutor, cfg: DeployConfig) -> str:
if cfg.compose_file:
result = executor.compose_up(cfg.compose_file)
icon = "" if result.success else ""
return (
f"{icon} Compose 部署{'成功' if result.success else '失败'}\n"
f"{'' * 50}\n"
f" Compose 文件: {cfg.compose_file}\n"
f"{'' * 50}\n"
f"{result.output[:1500]}"
)
results = executor.deploy(cfg)
return self._fmt_deploy(results, cfg)
def _do_status(self, executor: DockerExecutor, container_name: str) -> str:
status_r = executor.status(container_name)
stats_r = executor.stats(container_name)
lines = [
f"📊 容器状态: {container_name}",
"" * 50,
status_r.output or "容器不存在或未运行",
]
if stats_r.success and stats_r.output:
lines += ["", "📈 资源使用:", stats_r.output]
return "\n".join(lines)
def _do_logs(
self, executor: DockerExecutor, container_name: str, tail: int | None
) -> str:
result = executor.logs(container_name, tail)
n = tail if tail is not None else _cfg('default_tail_lines', 100)
if result.success:
return (
f"📋 容器日志: {container_name} (最近 {n} 行)\n"
f"{'' * 50}\n"
f"{result.output or '(无日志输出)'}"
)
return f"❌ 获取日志失败: {result.stderr[:300]}"
def _do_inspect(self, executor: DockerExecutor, container_name: str) -> str:
result = executor.inspect(container_name)
if result.success:
try:
data = json.loads(result.stdout)
if data:
c = data[0]
info = {
"Name": c.get("Name", ""),
"Status": c.get("State", {}).get("Status", ""),
"Image": c.get("Config", {}).get("Image", ""),
"Ports": c.get("NetworkSettings", {}).get("Ports", {}),
"Mounts": [m.get("Source") for m in c.get("Mounts", [])],
"Created": c.get("Created", ""),
}
return (
f"🔍 容器详情: {container_name}\n"
f"{'' * 50}\n"
f"{json.dumps(info, ensure_ascii=False, indent=2)}"
)
except json.JSONDecodeError:
pass
return f"❌ 获取容器详情失败: {result.stderr[:300]}"
# ── 格式化输出 ─────────────────────────────────────────────
@staticmethod
def _fmt_deploy(results: list[CommandResult], cfg: DeployConfig) -> str:
lines = [
"🚀 容器部署结果",
"" * 50,
f" 镜像 : {cfg.image}",
f" 容器名 : {cfg.container_name}",
f" 端口 : {', '.join(cfg.ports) or '(无)'}",
f" 数据卷 : {', '.join(cfg.volumes) or '(无)'}",
f" 重启策略: {cfg.restart_policy} "
f"[config.yaml default_restart_policy="
f"{_cfg('default_restart_policy')}]",
"" * 50,
]
all_ok = True
for r in results:
icon = "" if r.success else ""
lines.append(f" {icon} $ {r.command[:70]}")
if r.output:
lines.append(f" └─ {r.output[:150]}")
if not r.success:
all_ok = False
lines.append(f" └─ 错误: {r.stderr[:150]}")
lines.append("" * 50)
lines.append(
f"✅ 部署成功!容器 [{cfg.container_name}] 已启动"
if all_ok else
"⚠️ 部署过程中有步骤失败,请检查上方错误信息"
)
return "\n".join(lines)
@staticmethod
def _fmt_single(result: CommandResult, action: str) -> str:
icon = "" if result.success else ""
status = "成功" if result.success else "失败"
return (
f"{icon} {action} {status}\n"
f"{'' * 40}\n"
f"{result.output[:500] or '(无输出)'}"
)
@staticmethod
def _format_error(action: str, host: str, error: str) -> str:
lines = [
f"❌ SSH Docker [{action}] 操作失败",
"" * 50,
f" 服务器: {host}",
f" 错误 : {error}",
"" * 50,
"💡 排查建议:",
]
el = error.lower()
if "authentication" in el or "auth" in el:
lines += [
" • 检查用户名/密码是否正确",
" • 检查 SSH 密钥路径和权限chmod 600 ~/.ssh/id_rsa",
" • 或在 config.yaml tools.ssh_docker.servers 中配置预设",
]
elif "connection" in el or "timed out" in el:
lines += [
" • 检查服务器 IP 和 SSH 端口是否正确",
" • 检查防火墙是否开放 SSH 端口",
f" • config.yaml connect_timeout={_cfg('connect_timeout')}s可适当增大",
]
elif "docker" in el:
lines += [
" • 确认 Docker 已安装: docker --version",
" • 确认 Docker 服务运行: sudo systemctl start docker",
" • 确认用户有 Docker 权限: sudo usermod -aG docker $USER",
]
return "\n".join(lines)
# ── 参数校验 ──────────────────────────────────────────────
@staticmethod
def _validate(
kwargs: dict,
action: str,
image: str,
container_name: str,
compose_file: str,
) -> str | None:
# 必须提供 host 或 server 之一
if not kwargs.get("host") and not kwargs.get("server"):
return "❌ 参数错误: 必须提供 host服务器地址或 server预设名称之一"
if action not in DockerExecutor.ALLOWED_ACTIONS:
return (
f"❌ 不支持的操作: {action}\n"
f" 可选值: {', '.join(sorted(DockerExecutor.ALLOWED_ACTIONS))}"
)
# 主机白名单检查(来自 config.yaml allowed_hosts
host = kwargs.get("host", "")
allowed_hosts = _cfg('allowed_hosts', [])
if host and allowed_hosts and host not in allowed_hosts:
return (
f"❌ 安全限制: 服务器 '{host}' 不在白名单中\n"
f" 白名单: {allowed_hosts}\n"
f" 请在 config.yaml → tools.ssh_docker.allowed_hosts 中添加"
)
if action == "deploy" and not image and not compose_file:
return "❌ deploy 操作需要指定 image镜像名或 compose_fileCompose 文件路径)"
needs_container = {
"start", "stop", "restart", "logs", "remove", "inspect", "stats"
}
if action in needs_container and not container_name:
return f"{action} 操作需要指定 container_name容器名称"
needs_compose = {"compose_up", "compose_down", "compose_ps"}
if action in needs_compose and not compose_file:
return f"{action} 操作需要指定 compose_filedocker-compose.yml 路径)"
return None

482
tools/static_analyzer.py Normal file
View File

@ -0,0 +1,482 @@
"""
tools/static_analyzer.py
C/C++ 静态分析工具 所有配置通过 settings.tools['static_analyzer'][key] 获取
"""
import json
import os
import re
import shutil
import subprocess
import time
from dataclasses import dataclass, field
from pathlib import Path
from config.settings import settings
from utils.logger import get_logger
logger = get_logger("TOOL.StaticAnalyzer")
# ════════════════════════════════════════════════════════════════
# 配置访问快捷函数(统一入口,便于调试)
# ════════════════════════════════════════════════════════════════
def _cfg(key: str, fallback=None):
"""读取 static_analyzer 工具配置,不存在时返回 fallback"""
return settings.tools['static_analyzer'].get(key, fallback)
# ════════════════════════════════════════════════════════════════
# 数据结构
# ════════════════════════════════════════════════════════════════
@dataclass
class AnalysisIssue:
file: str
line: int
column: int
severity: str # error | warning | style | performance | information
rule_id: str
message: str
tool: str
def to_dict(self) -> dict:
return {
"file": self.file, "line": self.line, "column": self.column,
"severity": self.severity, "rule_id": self.rule_id,
"message": self.message, "tool": self.tool,
}
def __str__(self) -> str:
return (
f"[{self.severity.upper():12s}] {self.file}:{self.line}:{self.column}"
f" ({self.rule_id}) {self.message}"
)
@dataclass
class AnalysisResult:
project_dir: str
tool: str
success: bool
issues: list[AnalysisIssue] = field(default_factory=list)
raw_output: str = ""
error: str = ""
elapsed_sec: float = 0.0
@property
def error_count(self) -> int: return sum(1 for i in self.issues if i.severity == "error")
@property
def warning_count(self) -> int: return sum(1 for i in self.issues if i.severity == "warning")
@property
def style_count(self) -> int: return sum(1 for i in self.issues if i.severity in ("style", "performance"))
@property
def total_count(self) -> int: return len(self.issues)
def summary(self) -> str:
max_show = min(20, _cfg('max_issues', 500))
if not self.success:
return f"❌ 分析失败: {self.error}"
lines = [
f"📊 静态分析完成 [{self.tool}] 耗时: {self.elapsed_sec:.1f}s",
f" 工程目录 : {self.project_dir}",
f" 问题总计 : {self.total_count}",
f" ├─ 错误 (error) : {self.error_count}",
f" ├─ 警告 (warning): {self.warning_count}",
f" └─ 风格 (style) : {self.style_count}",
]
if self.issues:
lines.append(f"\n📋 问题详情(最多显示 {max_show} 条):")
for issue in self.issues[:max_show]:
lines.append(f" {issue}")
if self.total_count > max_show:
lines.append(f" ... 还有 {self.total_count - max_show}")
else:
lines.append(" ✅ 未发现任何问题!")
return "\n".join(lines)
def to_dict(self) -> dict:
max_issues = _cfg('max_issues', 500)
return {
"project_dir": self.project_dir,
"tool": self.tool,
"success": self.success,
"elapsed_sec": round(self.elapsed_sec, 2),
"stats": {
"total": self.total_count,
"error": self.error_count,
"warning": self.warning_count,
"style": self.style_count,
},
"issues": [i.to_dict() for i in self.issues[:max_issues]],
"error": self.error,
}
# ════════════════════════════════════════════════════════════════
# 各工具解析器
# ════════════════════════════════════════════════════════════════
class CppcheckParser:
SEVERITY_MAP = {
"error": "error", "warning": "warning", "style": "style",
"performance": "performance", "portability": "style",
"information": "information",
}
@classmethod
def build_command(cls, project_dir: str, standard: str, extra_args: str) -> list[str]:
jobs = _cfg('jobs', 4)
cfg_extra = _cfg('tool_extra_args', {}).get('cppcheck', '')
full_args = f"{cfg_extra} {extra_args}".strip()
cmd = [
"cppcheck",
"--enable=all",
"--xml", "--xml-version=2",
f"--std={standard}",
f"-j{jobs}",
]
if full_args:
cmd.extend(full_args.split())
cmd.append(project_dir)
return cmd
@classmethod
def parse(cls, output: str, tool: str = "cppcheck") -> list[AnalysisIssue]:
issues: list[AnalysisIssue] = []
try:
import xml.etree.ElementTree as ET
root = ET.fromstring(output)
for error in root.iter("error"):
severity = cls.SEVERITY_MAP.get(error.get("severity", "warning"), "warning")
rule_id = error.get("id", "unknown")
message = error.get("msg", "")
loc = error.find("location")
if loc is not None:
file_path = loc.get("file", "unknown")
line = int(loc.get("line", 0))
column = int(loc.get("column", 0))
else:
file_path, line, column = "unknown", 0, 0
issues.append(AnalysisIssue(
file=file_path, line=line, column=column,
severity=severity, rule_id=rule_id,
message=message, tool=tool,
))
except Exception as e:
logger.warning(f"⚠️ XML 解析失败,回退文本解析: {e}")
issues = cls._parse_text(output, tool)
return issues
@staticmethod
def _parse_text(output: str, tool: str) -> list[AnalysisIssue]:
issues = []
pattern = re.compile(
r"^(.+?):(\d+):(\d+):\s+(error|warning|style|performance|information):\s+"
r"(.+?)(?:\s+\[(\w+)\])?$", re.MULTILINE,
)
for m in pattern.finditer(output):
issues.append(AnalysisIssue(
file=m.group(1), line=int(m.group(2)), column=int(m.group(3)),
severity=m.group(4), rule_id=m.group(6) or "unknown",
message=m.group(5), tool=tool,
))
return issues
class ClangTidyParser:
@classmethod
def build_command(cls, project_dir: str, standard: str, extra_args: str) -> list[str]:
cfg_extra = _cfg('tool_extra_args', {}).get('clang-tidy', '')
full_extra = f"{cfg_extra} {extra_args}".strip()
# 从 extra 中提取 --checks 值
m = re.search(r"--checks=(\S+)", full_extra)
checks = m.group(1) if m else "*"
if shutil.which("run-clang-tidy"):
cmd = [
"run-clang-tidy",
f"-checks={checks}",
"-p", os.path.join(project_dir, "build"),
]
else:
cmd = ["clang-tidy"]
if full_extra:
cmd.extend(full_extra.split())
src_files = []
for ext in ("*.cpp", "*.c", "*.cc", "*.cxx"):
src_files.extend(Path(project_dir).rglob(ext))
cmd.extend(str(f) for f in src_files[:50])
return cmd
@classmethod
def parse(cls, output: str, tool: str = "clang-tidy") -> list[AnalysisIssue]:
issues = []
pattern = re.compile(
r"^(.+?):(\d+):(\d+):\s+(error|warning|note):\s+(.+?)(?:\s+\[([\w\-\.]+)\])?$",
re.MULTILINE,
)
for m in pattern.finditer(output):
if m.group(4) == "note":
continue
issues.append(AnalysisIssue(
file=m.group(1), line=int(m.group(2)), column=int(m.group(3)),
severity=m.group(4), rule_id=m.group(6) or "unknown",
message=m.group(5), tool=tool,
))
return issues
class InferParser:
@classmethod
def build_command(cls, project_dir: str, standard: str, extra_args: str) -> list[str]:
cfg_extra = _cfg('tool_extra_args', {}).get('infer', '')
full_extra = f"{cfg_extra} {extra_args}".strip()
cmd = [
"infer", "run",
"--results-dir", os.path.join(project_dir, "infer-out"),
]
if full_extra:
cmd.extend(full_extra.split())
cmd += ["--", "make", "-C", project_dir]
return cmd
@classmethod
def parse(cls, output: str, tool: str = "infer") -> list[AnalysisIssue]:
issues = []
try:
data = json.loads(output)
for item in data:
issues.append(AnalysisIssue(
file=item.get("file", "unknown"),
line=item.get("line", 0),
column=0,
severity="error" if item.get("severity") == "ERROR" else "warning",
rule_id=item.get("bug_type", "unknown"),
message=item.get("qualifier", ""),
tool=tool,
))
except json.JSONDecodeError:
pattern = re.compile(r"(.+\.(?:cpp|c|cc|h)):(\d+):\s+(?:error|warning):\s+(.+)")
for m in pattern.finditer(output):
issues.append(AnalysisIssue(
file=m.group(1), line=int(m.group(2)), column=0,
severity="warning", rule_id="infer",
message=m.group(3), tool=tool,
))
return issues
_TOOL_REGISTRY: dict[str, type] = {
"cppcheck": CppcheckParser,
"clang-tidy": ClangTidyParser,
"infer": InferParser,
}
# ════════════════════════════════════════════════════════════════
# 主工具类
# ════════════════════════════════════════════════════════════════
class StaticAnalyzerTool:
"""
C/C++ 静态分析工具
所有配置均通过 settings.tools['static_analyzer'][key] 读取
"""
name = "static_analyzer"
description = (
"对指定目录下的 C/C++ 工程调用外部静态分析工具cppcheck/clang-tidy/infer"
"进行代码质量检查,返回错误、警告及代码风格问题"
)
parameters = {
"project_dir": {
"type": "string",
"description": "C/C++ 工程根目录的绝对路径,例如 /home/user/myproject",
},
"tool": {
"type": "string",
"description": "静态分析工具: cppcheck默认| clang-tidy | infer",
"enum": ["cppcheck", "clang-tidy", "infer"],
},
"standard": {
"type": "string",
"description": "C/C++ 语言标准: c89 | c99 | c11 | c++11 | c++14 | c++17 | c++20",
},
"extra_args": {
"type": "string",
"description": "额外命令行参数(追加到 config.yaml tool_extra_args 之后)",
},
"output_format": {
"type": "string",
"description": "输出格式: summary默认| json | full",
"enum": ["summary", "json", "full"],
},
"timeout": {
"type": "integer",
"description": "分析超时秒数(不传则使用 config.yaml 中的 timeout",
},
}
def execute(self, **kwargs) -> str:
# ── 读取参数,未提供时使用 config.yaml 中的默认值 ──────
project_dir = kwargs.get("project_dir", "")
tool_name = kwargs.get("tool", _cfg('default_tool', 'cppcheck')).lower()
standard = kwargs.get("standard", _cfg('default_std', 'c++17'))
extra_args = kwargs.get("extra_args", "")
output_format = kwargs.get("output_format", _cfg('output_format', 'summary'))
timeout = int(kwargs.get("timeout", _cfg('timeout', 120)))
logger.info(
f"🔍 静态分析启动\n"
f" 工程目录 : {project_dir}\n"
f" 分析工具 : {tool_name} "
f"[config default_tool={_cfg('default_tool')}]\n"
f" 语言标准 : {standard} "
f"[config default_std={_cfg('default_std')}]\n"
f" 超时 : {timeout}s "
f"[config timeout={_cfg('timeout')}s]\n"
f" 并行数 : {_cfg('jobs')} "
f"[config jobs={_cfg('jobs')}]\n"
f" 最大问题数: {_cfg('max_issues')}"
)
# ── 参数校验 ──────────────────────────────────────────
err = self._validate(project_dir, tool_name)
if err:
return err
# ── 构造并执行命令 ────────────────────────────────────
parser_cls = _TOOL_REGISTRY[tool_name]
try:
cmd = parser_cls.build_command(project_dir, standard, extra_args)
except Exception as e:
return f"❌ 构造分析命令失败: {e}"
logger.info(f"🚀 执行命令: {' '.join(cmd)}")
result = self._run_command(cmd, project_dir, timeout, tool_name)
# 截断超过 max_issues 的问题
max_issues = _cfg('max_issues', 500)
if len(result.issues) > max_issues:
logger.info(f"⚠️ 问题数 {len(result.issues)} 超过上限 {max_issues},已截断")
result.issues = result.issues[:max_issues]
return self._format_output(result, output_format)
# ── 私有方法 ──────────────────────────────────────────────
@staticmethod
def _validate(project_dir: str, tool_name: str) -> str | None:
if not project_dir:
return "❌ 参数错误: project_dir 不能为空"
path = Path(project_dir)
if not path.exists():
return f"❌ 目录不存在: {project_dir}"
if not path.is_dir():
return f"❌ 路径不是目录: {project_dir}"
# 白名单校验(来自 config.yaml allowed_roots
allowed_roots = _cfg('allowed_roots', [])
if allowed_roots and not any(
project_dir.startswith(r) for r in allowed_roots
):
return (
f"❌ 安全限制: {project_dir} 不在白名单中\n"
f" 白名单: {allowed_roots}\n"
f" 请在 config.yaml → tools.static_analyzer.allowed_roots 中添加"
)
# 检查是否包含 C/C++ 源文件
src_files = (
list(path.rglob("*.cpp")) + list(path.rglob("*.c")) +
list(path.rglob("*.cc")) + list(path.rglob("*.h"))
)
if not src_files:
return f"❌ 目录中未找到 C/C++ 源文件: {project_dir}"
if tool_name not in _TOOL_REGISTRY:
return (
f"❌ 不支持的分析工具: {tool_name}\n"
f" 可选值: {', '.join(_TOOL_REGISTRY.keys())}"
)
exe = "run-clang-tidy" if tool_name == "clang-tidy" else tool_name
if not shutil.which(exe) and not shutil.which(tool_name):
return (
f"❌ 分析工具未安装: {tool_name}\n"
f" 安装方式:\n"
f" cppcheck : sudo apt install cppcheck\n"
f" clang-tidy: sudo apt install clang-tidy\n"
f" infer : https://fbinfer.com/docs/getting-started"
)
return None
@staticmethod
def _run_command(
cmd: list[str], project_dir: str, timeout: int, tool_name: str,
) -> AnalysisResult:
start = time.time()
try:
proc = subprocess.run(
cmd, cwd=project_dir,
capture_output=True, text=True,
timeout=timeout, encoding="utf-8", errors="replace",
)
elapsed = time.time() - start
raw_output = proc.stderr if proc.stderr.strip() else proc.stdout
logger.debug(f"📄 原始输出(前 500 字符):\n{raw_output[:500]}")
parser_cls = _TOOL_REGISTRY[tool_name]
issues = parser_cls.parse(raw_output, tool_name)
if tool_name == "infer":
report_path = Path(project_dir) / "infer-out" / "report.json"
if report_path.exists():
issues = InferParser.parse(
report_path.read_text(encoding="utf-8"), "infer"
)
logger.info(f"✅ 分析完成: {len(issues)} 个问题,耗时 {elapsed:.1f}s")
return AnalysisResult(
project_dir=project_dir, tool=tool_name,
success=True, issues=issues,
raw_output=raw_output, elapsed_sec=elapsed,
)
except subprocess.TimeoutExpired:
elapsed = time.time() - start
msg = (
f"分析超时(>{timeout}s\n"
f" 请增大 config.yaml → tools.static_analyzer.timeout"
)
logger.error(f"{msg}")
return AnalysisResult(
project_dir=project_dir, tool=tool_name,
success=False, error=msg, elapsed_sec=elapsed,
)
except FileNotFoundError:
return AnalysisResult(
project_dir=project_dir, tool=tool_name,
success=False, error=f"命令未找到: {cmd[0]}",
)
except Exception as e:
return AnalysisResult(
project_dir=project_dir, tool=tool_name,
success=False, error=str(e),
)
@staticmethod
def _format_output(result: AnalysisResult, fmt: str) -> str:
if fmt == "json":
return json.dumps(result.to_dict(), ensure_ascii=False, indent=2)
if fmt == "full":
return (
f"{result.summary()}\n\n{'' * 60}\n"
f"📄 原始输出:\n{result.raw_output[:3000]}"
)
return result.summary()

View File

@ -29,11 +29,11 @@ class WebSearchTool(BaseTool):
def __init__(self):
super().__init__()
cfg = settings.tools.web_search
self._default_max = cfg.max_results
self._engine = cfg.engine
self._api_key = cfg.api_key
self._timeout = cfg.timeout
cfg = settings.tools['web_search']
self._default_max = cfg['max_results']
self._engine = cfg['engine']
self._api_key = cfg['api_key']
self._timeout = cfg['timeout']
self.logger.debug(
f"⚙️ WebSearch engine={self._engine}, "
f"max_results={self._default_max}, "