first
This commit is contained in:
7
agent_runtime/__init__.py
Normal file
7
agent_runtime/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from . import code_tool # noqa: F401
|
||||
from . import image_zoom_tool # noqa: F401
|
||||
from . import search_tools # noqa: F401
|
||||
from . import system_tools # noqa: F401
|
||||
from . import web_fetch_tool # noqa: F401
|
||||
from . import workflow_tools # noqa: F401
|
||||
from . import memory_tools # noqa: F401
|
||||
BIN
agent_runtime/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/code_tool.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/code_tool.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/image_source_map.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/image_source_map.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/image_zoom_tool.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/image_zoom_tool.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/memory_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/memory_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/readonly_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/readonly_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/search_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/search_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/system_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/system_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/web_fetch_tool.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/web_fetch_tool.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/workflow_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/workflow_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
agent_runtime/__pycache__/write_tools.cpython-310.pyc
Normal file
BIN
agent_runtime/__pycache__/write_tools.cpython-310.pyc
Normal file
Binary file not shown.
74
agent_runtime/code_tool.py
Normal file
74
agent_runtime/code_tool.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
from qwen_agent.utils.utils import extract_code
|
||||
|
||||
ROOT_DIR = Path(__file__).resolve().parents[1]
|
||||
RUN_DIR = ROOT_DIR / '.tmp' / 'super_agent_data' / 'code_runs'
|
||||
DEFAULT_TIMEOUT = 60
|
||||
|
||||
|
||||
@register_tool('code_interpreter', allow_overwrite=True)
|
||||
class LocalCodeInterpreterTool(BaseTool):
|
||||
description = '本机 Python 代码执行工具,返回 stdout 和 stderr。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'code': {
|
||||
'type': 'string',
|
||||
'description': '要执行的 Python 代码'
|
||||
},
|
||||
'timeout_sec': {
|
||||
'type': 'integer',
|
||||
'description': '超时时间,单位秒',
|
||||
'default': DEFAULT_TIMEOUT
|
||||
}
|
||||
},
|
||||
'required': ['code'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params_dict = self._parse_code_params(params)
|
||||
code = params_dict['code']
|
||||
timeout_sec = int(params_dict.get('timeout_sec', DEFAULT_TIMEOUT))
|
||||
RUN_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', dir=RUN_DIR, delete=False, encoding='utf-8') as fp:
|
||||
fp.write(code)
|
||||
script_path = fp.name
|
||||
|
||||
completed = subprocess.run(
|
||||
[sys.executable, script_path],
|
||||
text=True,
|
||||
capture_output=True,
|
||||
timeout=timeout_sec,
|
||||
check=False,
|
||||
)
|
||||
payload = {
|
||||
'script_path': script_path,
|
||||
'returncode': completed.returncode,
|
||||
'stdout': completed.stdout,
|
||||
'stderr': completed.stderr,
|
||||
}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
|
||||
def _parse_code_params(self, params: Union[str, dict]) -> dict:
|
||||
if isinstance(params, dict):
|
||||
if 'code' not in params:
|
||||
raise ValueError('code 字段缺失')
|
||||
return params
|
||||
try:
|
||||
parsed = json.loads(params)
|
||||
if isinstance(parsed, dict) and 'code' in parsed:
|
||||
return parsed
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
code = extract_code(params)
|
||||
if not code.strip():
|
||||
raise ValueError('未检测到可执行代码')
|
||||
return {'code': code}
|
||||
32
agent_runtime/image_source_map.py
Normal file
32
agent_runtime/image_source_map.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
_MAP_LOCK = threading.Lock()
|
||||
_SAFE_TO_ORIGINAL: Dict[str, str] = {}
|
||||
MAX_RECORDS = 2048
|
||||
|
||||
|
||||
def _normalize_path(path_or_uri: str) -> str:
|
||||
raw = path_or_uri.strip()
|
||||
if raw.startswith('file://'):
|
||||
raw = raw[len('file://'):]
|
||||
return str(Path(raw).expanduser().resolve())
|
||||
|
||||
|
||||
def register_safe_image(safe_path: str, original_path: str) -> None:
|
||||
safe_abs = _normalize_path(safe_path)
|
||||
original_abs = _normalize_path(original_path)
|
||||
with _MAP_LOCK:
|
||||
_SAFE_TO_ORIGINAL[safe_abs] = original_abs
|
||||
if len(_SAFE_TO_ORIGINAL) <= MAX_RECORDS:
|
||||
return
|
||||
overflow = len(_SAFE_TO_ORIGINAL) - MAX_RECORDS
|
||||
for key in list(_SAFE_TO_ORIGINAL.keys())[:overflow]:
|
||||
del _SAFE_TO_ORIGINAL[key]
|
||||
|
||||
|
||||
def resolve_original_image(path_or_uri: str) -> str:
|
||||
safe_abs = _normalize_path(path_or_uri)
|
||||
with _MAP_LOCK:
|
||||
return _SAFE_TO_ORIGINAL.get(safe_abs, safe_abs)
|
||||
185
agent_runtime/image_zoom_tool.py
Normal file
185
agent_runtime/image_zoom_tool.py
Normal file
@@ -0,0 +1,185 @@
|
||||
import math
|
||||
import os
|
||||
import uuid
|
||||
import base64
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from qwen_agent.llm.schema import ContentItem
|
||||
from qwen_agent.log import logger
|
||||
from qwen_agent.tools.base import BaseToolWithFileAccess, register_tool
|
||||
from qwen_agent.utils.utils import extract_images_from_messages
|
||||
|
||||
from .image_source_map import resolve_original_image
|
||||
|
||||
MAX_IMAGE_PIXELS = int(os.getenv('SAFE_MAX_IMAGE_PIXELS', str(4 * 1024 * 1024)))
|
||||
MAX_IMAGE_SIDE = int(os.getenv('SAFE_MAX_IMAGE_SIDE', '3072'))
|
||||
MIN_IMAGE_SIDE = int(os.getenv('SAFE_MIN_IMAGE_SIDE', '28'))
|
||||
MIN_BBOX_SIDE = 32
|
||||
JPEG_QUALITY = int(os.getenv('SAFE_JPEG_QUALITY', '90'))
|
||||
RESAMPLE_LANCZOS = getattr(getattr(Image, 'Resampling', Image), 'LANCZOS')
|
||||
HTTP_TIMEOUT_SEC = 30
|
||||
|
||||
|
||||
def _normalize_local_path(path_or_uri: str) -> str:
|
||||
raw = path_or_uri.strip()
|
||||
if raw.startswith('file://'):
|
||||
raw = raw[len('file://'):]
|
||||
return str(Path(raw).expanduser().resolve())
|
||||
|
||||
|
||||
def _is_image_data_uri(image_ref: str) -> bool:
|
||||
return image_ref.strip().lower().startswith('data:image')
|
||||
|
||||
|
||||
def _load_data_uri_image(image_ref: str) -> Image.Image:
|
||||
try:
|
||||
header, encoded = image_ref.split(',', 1)
|
||||
except ValueError as exc:
|
||||
raise ValueError('data URI 格式错误') from exc
|
||||
if ';base64' not in header.lower():
|
||||
raise ValueError('仅支持 base64 图片 data URI')
|
||||
decoded = base64.b64decode(encoded)
|
||||
return Image.open(BytesIO(decoded)).convert('RGB')
|
||||
|
||||
|
||||
def _resolve_image_reference(image_ref: str) -> str:
|
||||
if _is_image_data_uri(image_ref):
|
||||
return image_ref
|
||||
if image_ref.startswith('http://') or image_ref.startswith('https://'):
|
||||
return image_ref
|
||||
return resolve_original_image(image_ref)
|
||||
|
||||
|
||||
def _load_image(image_ref: str, work_dir: str) -> Image.Image:
|
||||
if _is_image_data_uri(image_ref):
|
||||
return _load_data_uri_image(image_ref)
|
||||
if image_ref.startswith('http://') or image_ref.startswith('https://'):
|
||||
response = requests.get(image_ref, timeout=HTTP_TIMEOUT_SEC)
|
||||
response.raise_for_status()
|
||||
return Image.open(BytesIO(response.content)).convert('RGB')
|
||||
|
||||
local = _normalize_local_path(image_ref)
|
||||
if os.path.exists(local):
|
||||
return Image.open(local).convert('RGB')
|
||||
|
||||
fallback = os.path.join(work_dir, image_ref)
|
||||
return Image.open(fallback).convert('RGB')
|
||||
|
||||
|
||||
def _ensure_min_bbox(
|
||||
left: float,
|
||||
top: float,
|
||||
right: float,
|
||||
bottom: float,
|
||||
img_w: int,
|
||||
img_h: int,
|
||||
) -> Tuple[int, int, int, int]:
|
||||
width = max(1.0, right - left)
|
||||
height = max(1.0, bottom - top)
|
||||
if width >= MIN_BBOX_SIDE and height >= MIN_BBOX_SIDE:
|
||||
return int(left), int(top), int(right), int(bottom)
|
||||
|
||||
scale = MIN_BBOX_SIDE / min(width, height)
|
||||
half_w = width * scale * 0.5
|
||||
half_h = height * scale * 0.5
|
||||
center_x = (left + right) * 0.5
|
||||
center_y = (top + bottom) * 0.5
|
||||
|
||||
new_left = max(0, int(math.floor(center_x - half_w)))
|
||||
new_top = max(0, int(math.floor(center_y - half_h)))
|
||||
new_right = min(img_w, int(math.ceil(center_x + half_w)))
|
||||
new_bottom = min(img_h, int(math.ceil(center_y + half_h)))
|
||||
return new_left, new_top, new_right, new_bottom
|
||||
|
||||
|
||||
def _relative_bbox_to_absolute(bbox_2d: list, img_w: int, img_h: int) -> Tuple[int, int, int, int]:
|
||||
rel_x1, rel_y1, rel_x2, rel_y2 = [float(v) for v in bbox_2d]
|
||||
abs_x1 = max(0.0, min(img_w, rel_x1 / 1000.0 * img_w))
|
||||
abs_y1 = max(0.0, min(img_h, rel_y1 / 1000.0 * img_h))
|
||||
abs_x2 = max(0.0, min(img_w, rel_x2 / 1000.0 * img_w))
|
||||
abs_y2 = max(0.0, min(img_h, rel_y2 / 1000.0 * img_h))
|
||||
left = min(abs_x1, abs_x2)
|
||||
top = min(abs_y1, abs_y2)
|
||||
right = max(abs_x1, abs_x2)
|
||||
bottom = max(abs_y1, abs_y2)
|
||||
return _ensure_min_bbox(left, top, right, bottom, img_w, img_h)
|
||||
|
||||
|
||||
def _scale_size(width: int, height: int) -> Tuple[int, int]:
|
||||
pixel_count = width * height
|
||||
if pixel_count <= 0:
|
||||
raise ValueError(f'无效图片尺寸: {width}x{height}')
|
||||
scale_by_pixels = math.sqrt(MAX_IMAGE_PIXELS / pixel_count) if pixel_count > MAX_IMAGE_PIXELS else 1.0
|
||||
longest_side = max(width, height)
|
||||
scale_by_side = MAX_IMAGE_SIDE / longest_side if longest_side > MAX_IMAGE_SIDE else 1.0
|
||||
scale = min(1.0, scale_by_pixels, scale_by_side)
|
||||
return (
|
||||
max(MIN_IMAGE_SIDE, int(width * scale)),
|
||||
max(MIN_IMAGE_SIDE, int(height * scale)),
|
||||
)
|
||||
|
||||
|
||||
def _resize_crop_if_needed(image: Image.Image) -> Image.Image:
|
||||
width, height = image.size
|
||||
new_w, new_h = _scale_size(width, height)
|
||||
if (new_w, new_h) == (width, height):
|
||||
return image
|
||||
return image.resize((new_w, new_h), RESAMPLE_LANCZOS)
|
||||
|
||||
|
||||
@register_tool('image_zoom_in_tool', allow_overwrite=True)
|
||||
class OriginalImageZoomTool(BaseToolWithFileAccess):
|
||||
description = '基于原图裁切指定区域,并在裁切后按安全阈值缩放输出。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'bbox_2d': {
|
||||
'type': 'array',
|
||||
'items': {
|
||||
'type': 'number'
|
||||
},
|
||||
'minItems': 4,
|
||||
'maxItems': 4,
|
||||
'description': '裁切框,格式 [x1,y1,x2,y2],坐标范围 0 到 1000'
|
||||
},
|
||||
'label': {
|
||||
'type': 'string',
|
||||
'description': '目标对象标签'
|
||||
},
|
||||
'img_idx': {
|
||||
'type': 'number',
|
||||
'description': '图片索引,从 0 开始'
|
||||
}
|
||||
},
|
||||
'required': ['bbox_2d', 'label', 'img_idx']
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> List[ContentItem]:
|
||||
params = self._verify_json_format_args(params)
|
||||
images = extract_images_from_messages(kwargs.get('messages', []))
|
||||
if not images:
|
||||
return [ContentItem(text='Error: 未找到输入图片')]
|
||||
|
||||
img_idx = int(params['img_idx'])
|
||||
if img_idx < 0 or img_idx >= len(images):
|
||||
return [ContentItem(text=f'Error: img_idx 越界,当前图片数量 {len(images)}')]
|
||||
|
||||
os.makedirs(self.work_dir, exist_ok=True)
|
||||
try:
|
||||
image_ref = images[img_idx]
|
||||
source_ref = _resolve_image_reference(image_ref)
|
||||
image = _load_image(source_ref, self.work_dir)
|
||||
bbox = _relative_bbox_to_absolute(params['bbox_2d'], *image.size)
|
||||
cropped = image.crop(bbox)
|
||||
resized = _resize_crop_if_needed(cropped)
|
||||
output_path = os.path.abspath(os.path.join(self.work_dir, f'{uuid.uuid4()}.jpg'))
|
||||
resized.save(output_path, format='JPEG', quality=JPEG_QUALITY, optimize=True)
|
||||
return [ContentItem(image=output_path)]
|
||||
except Exception as exc:
|
||||
logger.warning(str(exc))
|
||||
return [ContentItem(text=f'Tool Execution Error {exc}')]
|
||||
74
agent_runtime/memory_tools.py
Normal file
74
agent_runtime/memory_tools.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
# 从环境变量读取,如果读不到则默认为当前目录下的 memory.json
|
||||
# 使用 .resolve() 自动处理相对路径转绝对路径的逻辑
|
||||
MEMORY_FILE = Path(os.getenv('MEMORY_FILE_PATH', './memory.json')).resolve()
|
||||
|
||||
def _load_memory() -> list:
|
||||
"""内部函数:安全加载记忆并强制转换为列表格式"""
|
||||
if not MEMORY_FILE.exists():
|
||||
return []
|
||||
try:
|
||||
content = MEMORY_FILE.read_text(encoding='utf-8').strip()
|
||||
if not content:
|
||||
return []
|
||||
data = json.loads(content)
|
||||
# 核心修复:如果读到的是字典或其他格式,强制转为列表
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
return []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def _save_memory(memories: list):
|
||||
"""内部函数:安全保存"""
|
||||
try:
|
||||
MEMORY_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
MEMORY_FILE.write_text(json.dumps(memories, ensure_ascii=False, indent=2), encoding='utf-8')
|
||||
except Exception as e:
|
||||
print(f"写入记忆文件失败: {e}")
|
||||
|
||||
@register_tool('manage_memory', allow_overwrite=True)
|
||||
class MemoryTool(BaseTool):
|
||||
description = '长期记忆管理工具。支持 add (添加), list (查看), delete (删除索引)。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'operation': {'type': 'string', 'description': '操作类型: add|list|delete'},
|
||||
'content': {'type': 'string', 'description': '记忆内容(仅add模式)'},
|
||||
'index': {'type': 'integer', 'description': '索引号(仅delete模式)'}
|
||||
},
|
||||
'required': ['operation'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
op = params['operation'].lower()
|
||||
memories = _load_memory()
|
||||
|
||||
if op == 'add':
|
||||
content = params.get('content', '').strip()
|
||||
if not content:
|
||||
return "错误:内容不能为空。"
|
||||
memories.append(content)
|
||||
_save_memory(memories)
|
||||
return f"✅ 成功存入:『{content}』"
|
||||
|
||||
elif op == 'list':
|
||||
if not memories:
|
||||
return "目前没有任何长期记忆。"
|
||||
return "记忆列表:\n" + "\n".join([f"[{i}] {m}" for i, m in enumerate(memories)])
|
||||
|
||||
elif op == 'delete':
|
||||
idx = params.get('index')
|
||||
if idx is None or not (0 <= idx < len(memories)):
|
||||
return f"错误:索引 {idx} 无效。"
|
||||
removed = memories.pop(idx)
|
||||
_save_memory(memories)
|
||||
return f"🗑️ 已删除:『{removed}』"
|
||||
|
||||
return f"不支持的操作: {op}"
|
||||
107
agent_runtime/readonly_tools.py
Normal file
107
agent_runtime/readonly_tools.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Union
|
||||
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
DEFAULT_MAX_READ_BYTES = 512 * 1024
|
||||
|
||||
|
||||
def _project_root() -> Path:
|
||||
return Path(__file__).resolve().parents[1]
|
||||
|
||||
|
||||
def _split_root_items(raw: str) -> list[str]:
|
||||
if not raw.strip():
|
||||
return []
|
||||
return [item.strip() for item in raw.split(os.pathsep) if item.strip()]
|
||||
|
||||
|
||||
def _resolve_roots() -> tuple[Path, ...]:
|
||||
roots_value = os.getenv('READONLY_FS_ROOTS', '')
|
||||
root_items = _split_root_items(roots_value)
|
||||
if not root_items:
|
||||
legacy_root = os.getenv('READONLY_FS_ROOT', '')
|
||||
if legacy_root.strip():
|
||||
root_items = [legacy_root.strip()]
|
||||
if not root_items:
|
||||
root_items = [str(_project_root())]
|
||||
return tuple(Path(os.path.expanduser(item)).resolve() for item in root_items)
|
||||
|
||||
|
||||
def _resolve_target(raw_path: str) -> Path:
|
||||
return Path(os.path.expanduser(raw_path)).resolve()
|
||||
|
||||
|
||||
def _is_within_root(target: Path, root: Path) -> bool:
|
||||
try:
|
||||
target.relative_to(root)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def _ensure_within_roots(target: Path, roots: Iterable[Path]) -> None:
|
||||
allowed_roots = tuple(roots)
|
||||
if any(_is_within_root(target, root) for root in allowed_roots):
|
||||
return
|
||||
allowed_text = ', '.join(str(root) for root in allowed_roots)
|
||||
raise PermissionError(f'只允许访问这些根目录内的路径: {allowed_text};拒绝: {target}')
|
||||
|
||||
|
||||
@register_tool('filesystem', allow_overwrite=True)
|
||||
class ReadOnlyFilesystemTool(BaseTool):
|
||||
description = '只读文件系统工具,支持 list 和 read 两种操作。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'operation': {
|
||||
'type': 'string',
|
||||
'description': '仅支持 list|read'
|
||||
},
|
||||
'path': {
|
||||
'type': 'string',
|
||||
'description': '目标路径'
|
||||
},
|
||||
},
|
||||
'required': ['operation', 'path'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
operation = str(params['operation']).strip().lower()
|
||||
if operation not in {'list', 'read'}:
|
||||
raise PermissionError(f'只读策略已启用,禁止 operation={operation}')
|
||||
|
||||
roots = _resolve_roots()
|
||||
target = _resolve_target(str(params['path']))
|
||||
_ensure_within_roots(target, roots)
|
||||
if operation == 'list':
|
||||
return self._list_path(target)
|
||||
return self._read_file(target)
|
||||
|
||||
def _list_path(self, target: Path) -> str:
|
||||
if not target.exists():
|
||||
raise FileNotFoundError(f'路径不存在: {target}')
|
||||
if target.is_file():
|
||||
stat = target.stat()
|
||||
payload = {'type': 'file', 'path': str(target), 'size': stat.st_size}
|
||||
return json.dumps(payload, ensure_ascii=False)
|
||||
|
||||
items = []
|
||||
for child in sorted(target.iterdir()):
|
||||
item_type = 'dir' if child.is_dir() else 'file'
|
||||
size = child.stat().st_size if child.is_file() else None
|
||||
items.append({'name': child.name, 'type': item_type, 'size': size})
|
||||
payload = {'type': 'dir', 'path': str(target), 'items': items}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
|
||||
def _read_file(self, target: Path) -> str:
|
||||
if not target.exists() or not target.is_file():
|
||||
raise FileNotFoundError(f'文件不存在: {target}')
|
||||
limit = int(os.getenv('READONLY_FS_MAX_READ_BYTES', str(DEFAULT_MAX_READ_BYTES)))
|
||||
size = target.stat().st_size
|
||||
if size > limit:
|
||||
raise ValueError(f'文件过大: {size} bytes,超过读取上限 {limit} bytes')
|
||||
return target.read_text(encoding='utf-8')
|
||||
135
agent_runtime/search_tools.py
Normal file
135
agent_runtime/search_tools.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import os
|
||||
import re
|
||||
from typing import List, Union
|
||||
|
||||
from ddgs import DDGS
|
||||
|
||||
from qwen_agent.llm.schema import ContentItem
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
DEFAULT_RESULTS = 6
|
||||
DEFAULT_REGION = os.getenv('WEB_SEARCH_REGION', 'wt-wt')
|
||||
DEFAULT_SAFESEARCH = os.getenv('WEB_SEARCH_SAFESEARCH', 'on')
|
||||
QUERY_SUFFIX_PATTERN = re.compile(
|
||||
r'(是谁|是什么|是啥|什么意思|介绍一下|请介绍|是谁啊|是谁呀|是啥啊|是啥呀|吗|嘛|呢|么)$'
|
||||
)
|
||||
|
||||
|
||||
def _normalize_query(query: str) -> str:
|
||||
compact = query.strip()
|
||||
compact = compact.replace('?', '?').replace('!', '!').replace('。', '.')
|
||||
compact = compact.strip(' ?!.,;:,。?!;:')
|
||||
compact = compact.removeprefix('请问').strip()
|
||||
compact = QUERY_SUFFIX_PATTERN.sub('', compact).strip()
|
||||
compact = compact.strip(' ?!.,;:,。?!;:')
|
||||
return compact or query.strip()
|
||||
|
||||
|
||||
def _clamp_results(value: int) -> int:
|
||||
if value < 1:
|
||||
return 1
|
||||
if value > 12:
|
||||
return 12
|
||||
return value
|
||||
|
||||
|
||||
@register_tool('web_search', allow_overwrite=True)
|
||||
class LocalWebSearchTool(BaseTool):
|
||||
description = '搜索互联网并返回标题、链接和摘要。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'query': {
|
||||
'type': 'string',
|
||||
'description': '搜索关键词'
|
||||
},
|
||||
'max_results': {
|
||||
'type': 'integer',
|
||||
'description': '返回条数,建议 1 到 12',
|
||||
'default': DEFAULT_RESULTS
|
||||
}
|
||||
},
|
||||
'required': ['query'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
query = _normalize_query(params['query'])
|
||||
if not query:
|
||||
raise ValueError('query 不能为空')
|
||||
max_results = _clamp_results(int(params.get('max_results', DEFAULT_RESULTS)))
|
||||
|
||||
with DDGS() as ddgs:
|
||||
results = list(
|
||||
ddgs.text(
|
||||
query=query,
|
||||
max_results=max_results,
|
||||
region=DEFAULT_REGION,
|
||||
safesearch=DEFAULT_SAFESEARCH,
|
||||
)
|
||||
)
|
||||
|
||||
if not results:
|
||||
return f'未检索到结果,query={query}'
|
||||
|
||||
lines = []
|
||||
for idx, item in enumerate(results, start=1):
|
||||
title = item.get('title', '').strip()
|
||||
href = item.get('href', '').strip()
|
||||
body = item.get('body', '').strip()
|
||||
lines.append(f'[{idx}] {title}\nURL: {href}\n摘要: {body}')
|
||||
return '\n\n'.join(lines)
|
||||
|
||||
|
||||
@register_tool('image_search', allow_overwrite=True)
|
||||
class LocalImageSearchTool(BaseTool):
|
||||
description = '按关键词搜索图片并返回图文结果。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'query': {
|
||||
'type': 'string',
|
||||
'description': '图片搜索关键词'
|
||||
},
|
||||
'max_results': {
|
||||
'type': 'integer',
|
||||
'description': '返回条数,建议 1 到 12',
|
||||
'default': DEFAULT_RESULTS
|
||||
}
|
||||
},
|
||||
'required': ['query'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> List[ContentItem]:
|
||||
params = self._verify_json_format_args(params)
|
||||
query = _normalize_query(params['query'])
|
||||
if not query:
|
||||
raise ValueError('query 不能为空')
|
||||
max_results = _clamp_results(int(params.get('max_results', DEFAULT_RESULTS)))
|
||||
|
||||
try:
|
||||
with DDGS() as ddgs:
|
||||
results = list(
|
||||
ddgs.images(
|
||||
query=query,
|
||||
max_results=max_results,
|
||||
region=DEFAULT_REGION,
|
||||
safesearch=DEFAULT_SAFESEARCH,
|
||||
)
|
||||
)
|
||||
except Exception as exc:
|
||||
return [ContentItem(text=f'图片检索失败: {exc}')]
|
||||
|
||||
if not results:
|
||||
return [ContentItem(text=f'未检索到图片,query={query}')]
|
||||
|
||||
content: List[ContentItem] = []
|
||||
for idx, item in enumerate(results, start=1):
|
||||
title = item.get('title', '').strip()
|
||||
image_url = item.get('image', '').strip()
|
||||
page_url = item.get('url', '').strip()
|
||||
text = f'[{idx}] {title}\n图片: {image_url}\n来源: {page_url}'
|
||||
content.append(ContentItem(text=text))
|
||||
if image_url:
|
||||
content.append(ContentItem(image=image_url))
|
||||
return content
|
||||
159
agent_runtime/system_tools.py
Normal file
159
agent_runtime/system_tools.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
DEFAULT_TIMEOUT = 60
|
||||
|
||||
|
||||
def _ensure_parent(path: Path) -> None:
|
||||
parent = path.parent
|
||||
parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def _build_shell_command(command: str) -> list[str]:
|
||||
if os.name == 'nt':
|
||||
return ['powershell.exe', '-NoProfile', '-Command', command]
|
||||
return ['bash', '-lc', command]
|
||||
|
||||
|
||||
@register_tool('filesystem', allow_overwrite=True)
|
||||
class FilesystemTool(BaseTool):
|
||||
description = '文件系统工具,支持目录列举、读写文件、创建目录和删除。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'operation': {
|
||||
'type': 'string',
|
||||
'description': 'list|read|write|append|mkdir|remove'
|
||||
},
|
||||
'path': {
|
||||
'type': 'string',
|
||||
'description': '目标路径'
|
||||
},
|
||||
'content': {
|
||||
'type': 'string',
|
||||
'description': '写入内容,仅 write 或 append 需要'
|
||||
}
|
||||
},
|
||||
'required': ['operation', 'path'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
operation = params['operation'].strip().lower()
|
||||
target = Path(os.path.expanduser(params['path'])).resolve()
|
||||
handlers = {
|
||||
'list': self._list_path,
|
||||
'read': self._read_file,
|
||||
'write': self._write_file,
|
||||
'append': self._append_file,
|
||||
'mkdir': self._mkdir_path,
|
||||
'remove': self._remove_path,
|
||||
}
|
||||
if operation not in handlers:
|
||||
raise ValueError(f'不支持的 operation: {operation}')
|
||||
return handlers[operation](target, params)
|
||||
|
||||
def _list_path(self, target: Path, params: dict) -> str:
|
||||
if not target.exists():
|
||||
raise FileNotFoundError(f'路径不存在: {target}')
|
||||
if target.is_file():
|
||||
stat = target.stat()
|
||||
return json.dumps({'type': 'file', 'path': str(target), 'size': stat.st_size}, ensure_ascii=False)
|
||||
|
||||
items = []
|
||||
for child in sorted(target.iterdir()):
|
||||
item_type = 'dir' if child.is_dir() else 'file'
|
||||
size = child.stat().st_size if child.is_file() else None
|
||||
items.append({'name': child.name, 'type': item_type, 'size': size})
|
||||
return json.dumps({'type': 'dir', 'path': str(target), 'items': items}, ensure_ascii=False, indent=2)
|
||||
|
||||
def _read_file(self, target: Path, params: dict) -> str:
|
||||
if not target.exists() or not target.is_file():
|
||||
raise FileNotFoundError(f'文件不存在: {target}')
|
||||
return target.read_text(encoding='utf-8')
|
||||
|
||||
def _write_file(self, target: Path, params: dict) -> str:
|
||||
content = params.get('content')
|
||||
if content is None:
|
||||
raise ValueError('write 操作必须提供 content')
|
||||
_ensure_parent(target)
|
||||
target.write_text(content, encoding='utf-8')
|
||||
return f'写入成功: {target}'
|
||||
|
||||
def _append_file(self, target: Path, params: dict) -> str:
|
||||
content = params.get('content')
|
||||
if content is None:
|
||||
raise ValueError('append 操作必须提供 content')
|
||||
_ensure_parent(target)
|
||||
with target.open('a', encoding='utf-8') as fp:
|
||||
fp.write(content)
|
||||
return f'追加成功: {target}'
|
||||
|
||||
def _mkdir_path(self, target: Path, params: dict) -> str:
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
return f'目录已创建: {target}'
|
||||
|
||||
def _remove_path(self, target: Path, params: dict) -> str:
|
||||
if not target.exists():
|
||||
raise FileNotFoundError(f'路径不存在: {target}')
|
||||
if target.is_dir():
|
||||
shutil.rmtree(target)
|
||||
else:
|
||||
target.unlink()
|
||||
return f'删除成功: {target}'
|
||||
|
||||
|
||||
@register_tool('run_command', allow_overwrite=True)
|
||||
class RunCommandTool(BaseTool):
|
||||
description = '执行本机命令并返回退出码、标准输出和标准错误。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'command': {
|
||||
'type': 'string',
|
||||
'description': '待执行命令'
|
||||
},
|
||||
'cwd': {
|
||||
'type': 'string',
|
||||
'description': '执行目录'
|
||||
},
|
||||
'timeout_sec': {
|
||||
'type': 'integer',
|
||||
'description': '超时时间秒数',
|
||||
'default': DEFAULT_TIMEOUT
|
||||
}
|
||||
},
|
||||
'required': ['command'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
command = params['command'].strip()
|
||||
if not command:
|
||||
raise ValueError('command 不能为空')
|
||||
timeout_sec = int(params.get('timeout_sec', DEFAULT_TIMEOUT))
|
||||
cwd_raw = params.get('cwd') or os.getcwd()
|
||||
cwd = str(Path(os.path.expanduser(cwd_raw)).resolve())
|
||||
|
||||
completed = subprocess.run(
|
||||
_build_shell_command(command),
|
||||
cwd=cwd,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
timeout=timeout_sec,
|
||||
check=False,
|
||||
)
|
||||
payload = {
|
||||
'command': command,
|
||||
'cwd': cwd,
|
||||
'returncode': completed.returncode,
|
||||
'stdout': completed.stdout,
|
||||
'stderr': completed.stderr,
|
||||
}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
104
agent_runtime/web_fetch_tool.py
Normal file
104
agent_runtime/web_fetch_tool.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import time
|
||||
import random
|
||||
from typing import Tuple, Union
|
||||
import requests
|
||||
from requests import Response
|
||||
from requests.exceptions import SSLError, RequestException
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
DEFAULT_MAX_CHARS = 10000
|
||||
|
||||
# 模拟真实浏览器请求头,防止 GitHub 等网站返回 429
|
||||
COMMON_HEADERS = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Connection': 'keep-alive'
|
||||
}
|
||||
|
||||
def _normalize_text(text: str) -> str:
|
||||
lines = [line.strip() for line in text.splitlines()]
|
||||
lines = [line for line in lines if line]
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _fetch_page(url: str, timeout: int = 30, retries: int = 2) -> Tuple[Union[Response, str], bool]:
|
||||
"""带有重试机制和伪装头的抓取函数"""
|
||||
for i in range(retries + 1):
|
||||
try:
|
||||
if i > 0:
|
||||
time.sleep(2 + random.uniform(1, 2) * i)
|
||||
|
||||
response = requests.get(url, headers=COMMON_HEADERS, timeout=timeout, verify=True)
|
||||
|
||||
if response.status_code == 429:
|
||||
if i < retries: continue
|
||||
return f"错误:目标网站限制了请求频率 (429)。请稍后再试,禁止读取本地无关文件。", False
|
||||
|
||||
response.raise_for_status()
|
||||
return response, False
|
||||
|
||||
except SSLError:
|
||||
try:
|
||||
response = requests.get(url, headers=COMMON_HEADERS, timeout=timeout, verify=False)
|
||||
response.raise_for_status()
|
||||
return response, True
|
||||
except Exception as e:
|
||||
return f"SSL 错误且备选方案失败: {str(e)}", False
|
||||
except RequestException as e:
|
||||
if i < retries: continue
|
||||
return f"网络抓取失败: {str(e)}", False
|
||||
|
||||
return "未知抓取错误", False
|
||||
|
||||
def _extract_page_text(html: str, max_chars: int) -> Tuple[str, str]:
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
for tag in soup(['script', 'style', 'noscript', 'header', 'footer', 'nav']):
|
||||
tag.decompose()
|
||||
title = soup.title.string.strip() if soup.title and soup.title.string else '无标题'
|
||||
body_text = _normalize_text(soup.get_text(separator='\n'))
|
||||
return title, body_text[:max_chars]
|
||||
|
||||
@register_tool('web_fetch', allow_overwrite=True)
|
||||
class WebFetchTool(BaseTool):
|
||||
description = '抓取网页正文并返回可读文本。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {'type': 'string', 'description': '网页链接'},
|
||||
'max_chars': {'type': 'integer', 'description': '返回最大字符数', 'default': DEFAULT_MAX_CHARS}
|
||||
},
|
||||
'required': ['url'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
url = params['url'].strip()
|
||||
max_chars = int(params.get('max_chars', DEFAULT_MAX_CHARS))
|
||||
|
||||
result, insecure = _fetch_page(url)
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
|
||||
title, body_text = _extract_page_text(result.text, max_chars)
|
||||
insecure_note = '(注意:使用了非安全连接)\n' if insecure else ''
|
||||
return f'标题: {title}\n链接: {url}\n{insecure_note}\n{body_text}'
|
||||
|
||||
@register_tool('web_extractor', allow_overwrite=True)
|
||||
class WebExtractorTool(BaseTool):
|
||||
description = '提取单个网页正文。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {'type': 'string', 'description': '网页链接'},
|
||||
'max_chars': {'type': 'integer', 'description': '返回最大字符数', 'default': DEFAULT_MAX_CHARS}
|
||||
},
|
||||
'required': ['url'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
# 复用 WebFetchTool 的逻辑,但作为独立的类注册
|
||||
fetcher = WebFetchTool(self.cfg)
|
||||
return fetcher.call(params, **kwargs)
|
||||
170
agent_runtime/workflow_tools.py
Normal file
170
agent_runtime/workflow_tools.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
ROOT_DIR = Path(__file__).resolve().parents[1]
|
||||
DATA_DIR = ROOT_DIR / '.tmp' / 'super_agent_data'
|
||||
MEMORY_FILE = DATA_DIR / 'memory.json'
|
||||
TODO_DIR = DATA_DIR / 'todos'
|
||||
TASK_FILE = DATA_DIR / 'tasks.jsonl'
|
||||
|
||||
|
||||
def _build_shell_command(command: str) -> list[str]:
|
||||
if os.name == 'nt':
|
||||
return ['powershell.exe', '-NoProfile', '-Command', command]
|
||||
return ['bash', '-lc', command]
|
||||
|
||||
|
||||
def _ensure_data_dirs() -> None:
|
||||
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
TODO_DIR.mkdir(parents=True, exist_ok=True)
|
||||
if not MEMORY_FILE.exists():
|
||||
MEMORY_FILE.write_text('{}', encoding='utf-8')
|
||||
|
||||
|
||||
def _load_memory() -> Dict[str, Any]:
|
||||
_ensure_data_dirs()
|
||||
return json.loads(MEMORY_FILE.read_text(encoding='utf-8'))
|
||||
|
||||
|
||||
def _save_memory(data: Dict[str, Any]) -> None:
|
||||
_ensure_data_dirs()
|
||||
MEMORY_FILE.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding='utf-8')
|
||||
|
||||
|
||||
@register_tool('save_memory', allow_overwrite=True)
|
||||
class SaveMemoryTool(BaseTool):
|
||||
description = '保存一条长期记忆,按 key 覆盖写入。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'key': {
|
||||
'type': 'string',
|
||||
'description': '记忆键名'
|
||||
},
|
||||
'value': {
|
||||
'type': 'string',
|
||||
'description': '记忆内容'
|
||||
}
|
||||
},
|
||||
'required': ['key', 'value'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
key = params['key'].strip()
|
||||
if not key:
|
||||
raise ValueError('key 不能为空')
|
||||
memory = _load_memory()
|
||||
memory[key] = params['value']
|
||||
_save_memory(memory)
|
||||
return f'已保存记忆: {key}'
|
||||
|
||||
|
||||
@register_tool('read_memory', allow_overwrite=True)
|
||||
class ReadMemoryTool(BaseTool):
|
||||
description = '读取长期记忆,支持读取单个 key 或全部。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'key': {
|
||||
'type': 'string',
|
||||
'description': '可选,不传则返回全部记忆'
|
||||
}
|
||||
},
|
||||
'required': [],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
memory = _load_memory()
|
||||
key = params.get('key')
|
||||
if key:
|
||||
return json.dumps({key: memory.get(key)}, ensure_ascii=False, indent=2)
|
||||
return json.dumps(memory, ensure_ascii=False, indent=2)
|
||||
|
||||
|
||||
@register_tool('todo_write', allow_overwrite=True)
|
||||
class TodoWriteTool(BaseTool):
|
||||
description = '写入任务清单文件。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'title': {
|
||||
'type': 'string',
|
||||
'description': '清单标题'
|
||||
},
|
||||
'items': {
|
||||
'type': 'array',
|
||||
'items': {
|
||||
'type': 'string'
|
||||
},
|
||||
'description': '任务项数组'
|
||||
}
|
||||
},
|
||||
'required': ['title', 'items'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
_ensure_data_dirs()
|
||||
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
safe_title = ''.join(ch if ch.isalnum() else '_' for ch in params['title'])[:40]
|
||||
todo_path = TODO_DIR / f'{ts}_{safe_title}.md'
|
||||
lines = [f'# {params["title"]}', '']
|
||||
for item in params['items']:
|
||||
lines.append(f'- [ ] {item}')
|
||||
todo_path.write_text('\n'.join(lines), encoding='utf-8')
|
||||
return f'任务清单已写入: {todo_path}'
|
||||
|
||||
|
||||
@register_tool('task', allow_overwrite=True)
|
||||
class TaskTool(BaseTool):
|
||||
description = '登记任务并可选执行命令,返回执行结果。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'task_name': {
|
||||
'type': 'string',
|
||||
'description': '任务名称'
|
||||
},
|
||||
'notes': {
|
||||
'type': 'string',
|
||||
'description': '任务说明'
|
||||
},
|
||||
'command': {
|
||||
'type': 'string',
|
||||
'description': '可选,执行命令'
|
||||
}
|
||||
},
|
||||
'required': ['task_name'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
_ensure_data_dirs()
|
||||
event = {
|
||||
'time': datetime.now().isoformat(timespec='seconds'),
|
||||
'task_name': params['task_name'],
|
||||
'notes': params.get('notes', ''),
|
||||
'command': params.get('command', ''),
|
||||
}
|
||||
result = None
|
||||
command = params.get('command')
|
||||
if command:
|
||||
run = subprocess.run(_build_shell_command(command), text=True, capture_output=True, check=False)
|
||||
result = {
|
||||
'returncode': run.returncode,
|
||||
'stdout': run.stdout,
|
||||
'stderr': run.stderr,
|
||||
}
|
||||
event['result'] = result
|
||||
with TASK_FILE.open('a', encoding='utf-8') as fp:
|
||||
fp.write(json.dumps(event, ensure_ascii=False) + '\n')
|
||||
payload = {'saved_to': str(TASK_FILE), 'task': event, 'command_result': result}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
43
agent_runtime/write_tools.py
Normal file
43
agent_runtime/write_tools.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from qwen_agent.tools.base import BaseTool, register_tool
|
||||
|
||||
def _split_items(raw: str) -> list[str]:
|
||||
return [item.strip() for item in raw.split(';') if item.strip()]
|
||||
|
||||
def _resolve_write_roots() -> tuple[Path, ...]:
|
||||
roots_value = os.getenv('WRITEABLE_FS_ROOTS', '')
|
||||
return tuple(Path(os.path.expanduser(item)).resolve() for item in _split_items(roots_value))
|
||||
|
||||
@register_tool('write_file', allow_overwrite=True)
|
||||
class WriteFileTool(BaseTool):
|
||||
description = '文件写入工具。只要路径在白名单内,即可直接创建或覆盖文件。'
|
||||
parameters = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'path': {'type': 'string', 'description': '目标绝对路径'},
|
||||
'content': {'type': 'string', 'description': '要写入的完整内容'}
|
||||
},
|
||||
'required': ['path', 'content'],
|
||||
}
|
||||
|
||||
def call(self, params: Union[str, dict], **kwargs) -> str:
|
||||
params = self._verify_json_format_args(params)
|
||||
target = Path(os.path.expanduser(str(params['path']))).resolve()
|
||||
content = str(params.get('content', ''))
|
||||
|
||||
# 核心防线:检查是否在白名单内
|
||||
roots = _resolve_write_roots()
|
||||
if not any(target.is_relative_to(root) for root in roots):
|
||||
allowed = ", ".join(str(r) for r in roots)
|
||||
return f"拒绝写入:路径不在白名单内。允许范围:{allowed}"
|
||||
|
||||
try:
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(target, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return f"✅ 成功:内容已保存至 {target}"
|
||||
except Exception as e:
|
||||
return f"写入失败:{str(e)}"
|
||||
Reference in New Issue
Block a user