# ai_tools.py
# 精简工具库（百度搜索 / 文件 / PDF / 命令 / HTTP）| 2025-09

import os
import subprocess
import sys
import trafilatura
import requests
from pydantic import BaseModel, Field, model_validator
from datetime import datetime
from urllib.parse import quote
from typing import Dict, List, Optional
import re
from io import BytesIO, StringIO

from bs4 import BeautifulSoup
from langchain_core.tools import tool

# ================
# 🖼️ PDF 支持中文
# ================
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer

try:
    pdfmetrics.registerFont(TTFont('SimHei', 'simhei.ttf'))
except Exception:
    pass

# ================
# 🔐 安全配置
# ================
ALLOWED_COMMANDS = ["ls", "cat", "echo", "pwd", "python", "pip", "curl", "wget", "date", "uname"]
DANGEROUS_PATTERNS = [
    r"rm\s+-rf\s+/",
    r"\bdd\b",
    r"\bshutdown\b",
    r"\breboot\b",
    r":$$\{\s*:\|\s*:&\s*\};:",  # fork bomb
    r"wget.*-O\s+/etc/",
    r"curl.*-o\s+/etc/",
]


# ================
# 🛠️ 工具参数 Schema 定义
# ================

class IsSafeCommandInput(BaseModel):
    command: str = Field(..., description="要检查的 Shell 命令，例如 'rm -rf /'")


class RunShellCommandInput(BaseModel):
    command: str = Field(..., description="要执行的 Shell 命令")
    timeout: int = Field(30, description="命令执行超时时间（秒），默认 30")


class ExecutePythonCodeInput(BaseModel):
    code: str = Field(..., description="要执行的 Python 代码片段")


class WriteToFileInput(BaseModel):
    filename: str = Field(..., description="要写入的文件路径，例如 'docs/report.txt'")
    content: Optional[str] = Field(None, description="要写入的文本内容，缺省则写入空文件")

    @model_validator(mode="before")
    @classmethod
    def normalize_payload(cls, value):
        # 兼容多种异常输入：
        # 1) 只有 filename，其值是字符串化的 JSON：'{"filename":"a.txt","content":"..."}'
        # 2) 仅传别名字段：file/text
        # 3) 直接传入 JSON 字符串
        try:
            if isinstance(value, dict):
                # 合并 filename 内嵌 JSON
                fn = value.get("filename")
                if isinstance(fn, str):
                    s = fn.strip()
                    if s.startswith("{") and s.endswith("}"):
                        import json as _json
                        try:
                            inner = _json.loads(s)
                            merged = {**value, **inner}
                            value = merged
                        except Exception:
                            pass
                # 别名兼容
                if "file" in value and "filename" not in value:
                    value["filename"] = value["file"]
                if "text" in value and "content" not in value:
                    value["content"] = value["text"]
                return value
            if isinstance(value, str):
                s = value.strip()
                if s.startswith("{") and s.endswith("}"):
                    import json as _json
                    try:
                        return _json.loads(s)
                    except Exception:
                        return value
            return value
        except Exception:
            return value


class ReadFromFileInput(BaseModel):
    filename: str = Field(..., description="要读取的文件路径")


class CreatePdfInput(BaseModel):
    content: str = Field(..., description="PDF 正文内容，支持换行")
    title: str = Field("文档", description="PDF 文档标题")
    filename: str = Field("output.pdf", description="生成的 PDF 文件名，例如 'report.pdf'")


class SearchWebInput(BaseModel):
    query: str = Field(..., description="要在百度上搜索的关键词")
    num_results: int = Field(5, description="返回结果数量，默认 5 条")


class CallApiInput(BaseModel):
    url: str = Field(..., description="要调用的 API 地址")
    method: str = Field("GET", description="HTTP 方法，如 GET、POST、PUT、DELETE")
    data: Optional[Dict] = Field(None, description="POST/PUT 请求的 JSON 数据")
    headers: Optional[Dict] = Field(None, description="自定义请求头")


class CrawlUrlInput(BaseModel):
    url: str = Field(..., description="要抓取的网页 URL")
    max_bytes: int = Field(200_000, description="最大抓取内容大小（字节），默认 200KB")


# ================
# 🛠️ 常用工具（带 args_schema）
# ================

@tool(args_schema=IsSafeCommandInput)
def is_safe_command(command: str) -> bool:
    """检查命令是否安全"""
    cmd = command.strip().lower()
    if any(re.search(pattern, cmd) for pattern in DANGEROUS_PATTERNS):
        return False
    cmd_base = cmd.split()[0]
    if cmd_base not in ALLOWED_COMMANDS:
        return False
    return True


@tool
def get_current_time() -> str:
    """获取当前时间"""
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")


@tool
def get_weather(city: str = "北京") -> str:
    """模拟获取天气（可替换为真实 API）"""
    return f"模拟天气：{city} 当前晴，25°C，湿度 60%"


@tool(args_schema=RunShellCommandInput)
def run_shell_command(command: str, timeout: int = 30) -> Dict:
    """执行 Shell 命令（带安全检查）"""
    if not is_safe_command(command):
        return {"error": "命令不安全或不在白名单中", "command": command}
    try:
        result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=timeout)
        return {
            "returncode": result.returncode,
            "stdout": result.stdout,
            "stderr": result.stderr,
            "command": command
        }
    except subprocess.TimeoutExpired:
        return {"error": "命令执行超时", "command": command}
    except Exception as e:
        return {"error": str(e), "command": command}


@tool(args_schema=ExecutePythonCodeInput)
def execute_python_code(code: str) -> Dict:
    """执行 Python 代码（简单沙箱）"""
    try:
        old_stdout = sys.stdout
        sys.stdout = captured_output = StringIO()
        exec(code, {})
        sys.stdout = old_stdout
        return {"success": True, "output": captured_output.getvalue()}
    except Exception as e:
        return {"success": False, "error": str(e)}


@tool(args_schema=WriteToFileInput)
def write_to_file(filename: str, content: Optional[str] = None) -> Dict:
    """写入文件"""
    try:
        # 兼容把整段 JSON 放进 filename 的异常情况
        if isinstance(filename, str):
            s = filename.strip()
            if s.startswith('{') and s.endswith('}'):
                import json as _json
                try:
                    obj = _json.loads(s)
                    filename = obj.get('filename', 'output.txt')
                    content = obj.get('content', content)
                except Exception:
                    pass

        # 清洗与截断文件名，避免 ENAMETOOLONG
        filename = re.sub(r'[<>:"/\\|?*\x00-\x1f]', '_', filename or 'output.txt')
        base_dir = os.path.dirname(filename)
        name_only = os.path.basename(filename)
        if len(name_only) > 120:
            root, ext = os.path.splitext(name_only)
            name_only = (root[:100] + '_') + ext[:10]
        filename = os.path.join(base_dir, name_only) if base_dir else name_only

        os.makedirs(os.path.dirname(filename) if os.path.dirname(filename) else '.', exist_ok=True)
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(content or "")
        return {"success": True, "path": os.path.abspath(filename)}
    except Exception as e:
        return {"error": str(e)}


@tool(args_schema=ReadFromFileInput)
def read_from_file(filename: str) -> Dict:
    """读取文件"""
    try:
        with open(filename, 'r', encoding='utf-8') as f:
            content = f.read()
        return {"content": content}
    except Exception as e:
        return {"error": str(e)}


@tool(args_schema=CreatePdfInput)
def create_pdf(content: str, title: str = "文档", filename: str = "output.pdf") -> Dict:
    """创建支持中文的 PDF"""
    try:
        filename = re.sub(r'[<>:"/\\|?*\x00-\x1f]', '_', filename)
        os.makedirs(os.path.dirname(filename) if os.path.dirname(filename) else '.', exist_ok=True)
        buffer = BytesIO()
        doc = SimpleDocTemplate(buffer, pagesize=A4)
        styles = getSampleStyleSheet()
        chinese_style = ParagraphStyle(
            'chinese', parent=styles['Normal'],
            fontName='SimHei', fontSize=12, leading=16, spaceAfter=12
        )
        story = [Paragraph(f"<b>{title}</b>", chinese_style), Spacer(1, 12)]
        for para in content.split('\n'):
            if para.strip():
                story.append(Paragraph(para.strip(), chinese_style))
            story.append(Spacer(1, 6))
        doc.build(story)
        with open(filename, 'wb') as f:
            f.write(buffer.getvalue())
        return {
            "success": True,
            "path": os.path.abspath(filename),
            "size": os.path.getsize(filename)
        }
    except Exception as e:
        return {"error": f"PDF 生成失败: {str(e)}"}


@tool(args_schema=SearchWebInput)
def search_web(query: str, num_results: int = 5) -> List[Dict]:
    """使用百度搜索，返回标题/链接/摘要"""
    try:
        url = f"https://www.baidu.com/s?wd={quote(query)}"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        resp.raise_for_status()
        soup = BeautifulSoup(resp.text, 'lxml')
        items = []
        for h3 in soup.select('h3.t, h3.c-title'):
            a = h3.find('a')
            if not a:
                continue
            title = a.get_text(strip=True)
            href = a.get('href')
            snippet_tag = h3.find_next_sibling('div')
            snippet = snippet_tag.get_text(strip=True) if snippet_tag else ''
            items.append({"title": title, "url": href, "snippet": snippet})
            if len(items) >= num_results:
                break
        return items or [{"url": url}]
    except Exception as e:
        return [{"error": str(e)}]


@tool(args_schema=CallApiInput)
def call_api(url: str, method: str = "GET", data: Dict = None, headers: Dict = None) -> Dict:
    """调用任意 HTTP API"""
    try:
        response = requests.request(
            method=method.upper(),
            url=url,
            json=data,
            headers=headers or {},
            timeout=15
        )
        return {
            "status_code": response.status_code,
            "json": response.json() if 'application/json' in response.headers.get('content-type', '') else None,
            "text": response.text
        }
    except Exception as e:
        return {"error": str(e)}


@tool(args_schema=CrawlUrlInput)
def crawl_url(url: str, max_bytes: int = 200_000) -> dict:
    """
    抓取网页并提取核心正文内容（去广告、去导航、保留主要文本）。
    使用 trafilatura 进行智能内容提取。
    """
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
        }
        resp = requests.get(url, headers=headers, timeout=15)
        resp.raise_for_status()

        html_content = resp.text
        if len(html_content) > max_bytes:
            html_content = html_content[:max_bytes]

        downloaded = trafilatura.extract(
            html_content,
            no_fallback=False,
            include_comments=False,
            include_tables=True,
            include_formatting=False,
            output_format='txt',
        )

        if not downloaded:
            return {"success": False, "error": "无法提取核心内容，可能是动态页面或结构复杂"}

        metadata = trafilatura.metadata.extract_metadata(html_content)
        title = (metadata.title if metadata and metadata.title
                 else resp.url.split("/")[-1].split("?")[0] or "未命名页面")

        return {
            "success": True,
            "url": url,
            "title": title,
            "text": downloaded.strip()
        }

    except Exception as e:
        return {"success": False, "error": str(e)}