from fastmcp import FastMCP
import asyncio
from fastmcp import Client

from tavily import AsyncTavilyClient
from typing import Dict, Any, List
import json
import os

from openai import OpenAI

tavily_client = AsyncTavilyClient(api_key=os.getenv("TAVILY_API_KEY"))

TRAINING_DATA_FILE = 'company_research_agent.jsonl'

client = OpenAI(
    base_url="https://ai.gitee.com/v1",
    api_key=os.getenv("OPENAI_API_KEY"), 
    default_headers={"X-Failover-Enabled":"true"},
)

# 工具定义部分
mcp = FastMCP('demo.mcp')

@mcp.tool()
async def crawl_company_website(url: str) -> Dict[str, Any]:
    """
    使用 Tavily 爬取公司官网，提取相关页面。
    
    输入参数：
    url (str): 公司官网的根 URL，例如 "https://example.com"。必须是有效的 HTTP(S) 链接。

    输出参数：
    Dict[str, Any]: 一个字典，键为页面 URL，值为包含 'raw_content' 和 'source' 的对象。
        示例：
        {
            "https://example.com/about": {
                "raw_content": "关于我们...公司成立于2020年...",
                "source": "company_website"
            },
            ...
        }
    """
    try:
        site_extraction = await tavily_client.crawl(
            url=url,
            instructions="Find any pages that will help us understand the company's business, products, services, and any other relevant information.",
            max_depth=1,
            max_breadth=10,
            extract_depth="basic"
        )

        site_scrape = {}
        for item in site_extraction.get("results", []):
            if item.get("raw_content"):
                raw = item.get("raw_content")
                page_url = item.get("url", url)

                # 截断内容，防止llm处理过长文本
                cleaned = " ".join(raw[:1200].split())
                if len(raw) > 1200:
                    cleaned += " [...]（内容已截断）"
                site_scrape[page_url] = {
                    # 'raw_content': item.get('raw_content'),
                    'summary': cleaned,
                    'source': 'company_website'
                }
        return site_scrape

    except Exception as e:
        return {
            "error": str(e),
            "message": "Failed to crawl the website."
        }

def clean_title(title: str) -> str:
    """简单清理网页标题"""
    return title.strip().replace("\n", "").replace("\r", "")

@mcp.tool()
async def search_documents(    
    queries: List[str],
    topic: str = "general",
    max_results: int = 5,
    search_depth: str = "basic",
    include_raw_content: bool = False
    ) -> Dict[str, Any]:
    """
    使用 Tavily API 并行搜索多个查询，返回清洗后的网页搜索结果。
    
    输入参数：
    - queries: 查询列表，例如 ["OpenAI 公司简介", "OpenAI 财务状况"]。
    - topic: 搜索主题分类，只能是 "general"、"news" 或 "finance"，默认为 "general"
    - max_results: 每个查询返回的最大结果数，默认为 5。
    - search_depth: 搜索深度，选项包括 "basic"、"advanced"，默认为 "basic"。
    - include_raw_content: 是否包含原始内容，默认为 False。
    
    输出参数：
    - merged_docs: 一个以 URL 为键的字典，每个值包含 title、content、query、url、source、score 等字段。
    """

    if not queries:
        return {"error": "No queries provided."}

    # 设置搜索参数
    search_params = {
        "search_depth": search_depth,
        "include_raw_content": include_raw_content,
        "max_results": max_results,
        "topic": topic
    }

    # 创建搜索任务
    search_tasks = [
        tavily_client.search(query, **search_params)
        for query in queries
    ]

    # 并行执行所有搜索
    try:
        results = await asyncio.gather(*search_tasks)
    except Exception as e:
        return {"error": f"Error during parallel search: {e}"}

    # 整合搜索结果
    merged_docs = {}
    for query, result in zip(queries, results):
        for item in result.get("results", []):
            if not item.get("content") or not item.get("url"):
                continue

            url = item.get("url")
            title = item.get("title", "")

            if title:
                title = clean_title(title)
                if title.lower() == url.lower() or not title.strip():
                    title = ""

            merged_docs[url] = {
                "title": title,
                "content": item.get("content", ""),
                "query": query,
                "url": url,
                "source": "web_search",
                "score": item.get("score", 0.0)
            }

    return merged_docs

# 客户端使用部分
mcp_client = Client(mcp)
async def execute_tool(tool_name: str, tool_args: dict) -> str:
    """实际异步调用 FastMCP 工具，提取结果数据，并返回 JSON 字符串。"""
    print(f"\n[执行工具: {tool_name}] 参数: {tool_args}")
    async with mcp_client:
        try:
            result_obj = await mcp_client.call_tool(tool_name, tool_args)
            
            actual_data = result_obj.data 
            
            print(f"✅ 工具执行完成，原始数据: {actual_data}")
            
            return json.dumps({"result": actual_data}) 
        
        except Exception as e:
            print(f"❌ 工具执行错误: {e}")
            return json.dumps({"error": str(e)})

# 多个工具的 JSON Schema 定义
CRAWL_WEBSITE_SCHEMA = {
    "type": "function",
    "function": {
        "name": "crawl_company_website",
        "description": "使用 Tavily 爬取公司官网，提取相关页面内容，包括关于、产品、服务等信息。",
        "parameters": {
            "type": "object",
            "properties": {
                "url": {
                    "type": "string",
                    "description": "公司官网的根 URL，例如 \"https://example.com\"。必须是有效的 HTTP(S) 链接。",
                }
            },
            "required": ["url"]
        }
    }
}

SEARCH_DOCUMENTS_SCHEMA = {
    "type": "function",
    "function": {
        "name": "search_documents",
        "description": "使用 Tavily API 并行搜索多个查询，返回清洗后的网页搜索结果。",
        "parameters": {
            "type": "object",
            "properties": {
                "queries": {
                    "type": "array",
                    "items": {"type": "string"},
                    "description": "查询字符串列表，例如 [\"OpenAI 公司简介\", \"OpenAI 财务状况\"]。"
                },
                "topic": {
                    "type": "string",
                    "enum": ["general", "news", "finance"],
                    "description": "搜索主题分类，只能是 \"general\"、\"news\" 或 \"finance\"。默认为 \"general\"。"
                },
                "max_results": {
                    "type": "integer",
                    "minimum": 1,
                    "maximum": 20,
                    "description": "每个查询返回的最大结果数。默认为 5。"
                },
                "search_depth": {
                    "type": "string",
                    "enum": ["basic", "advanced"],
                    "description": "搜索深度，选项包括 \"basic\" 或 \"advanced\"。默认为 \"basic\"。"
                },
                "include_raw_content": {
                    "type": "boolean",
                    "description": "是否包含原始网页内容。默认为 false。"
                }
            },
            "required": ["queries"]
        }
    }
}

AVAILABLE_TOOLS = [CRAWL_WEBSITE_SCHEMA, SEARCH_DOCUMENTS_SCHEMA]

def save_training_data(messages: List[Dict[str, Any]], filename: str):
    """将一次完整的对话历史保存为 JSONL 格式的训练数据。"""
    data = {"messages": messages}
    try:
        with open(filename, 'a', encoding='utf-8') as f:
            json_line = json.dumps(data, ensure_ascii=False)
            f.write(json_line + '\n')
        print(f"\n✅ 对话数据已成功保存到 {filename}\n{'='*50}")
    except Exception as e:
        print(f"❌ 保存训练数据失败: {e}")


async def handle_single_conversation(user_query: str):
    """处理单个用户查询的完整 Agent 流程。"""
    print(f"\n--- 🚀 开始处理新的查询：'{user_query[:30]}...' ---")

    # 初始对话消息 (每次循环都从头开始)
    messages = [
        {
            "role": "system",
            "content": f"你是一个善于使用工具的助手。如果用户的请求可以用提供的工具解决，请调用工具。可用的工具列表：\n{json.dumps(AVAILABLE_TOOLS, ensure_ascii=False)}"
        },
        {
            "role": "user",
            "content": user_query
        }
    ]

    print("--- 1. 第一次 LLM 调用：判断意图并选择工具 ---")
    
    response = client.chat.completions.create(
        model="DeepSeek-R1",
        messages=messages,
        tools=AVAILABLE_TOOLS, 
        tool_choice="auto",
        max_tokens=8192,
    )

    response_message = response.choices[0].message
    
    # **修复点：在保存模型消息前，使用 .model_dump() 转换为字典**
    messages.append(response_message.model_dump()) 
    print(f"模型响应: {'（Tool Call）' if response_message.tool_calls else response_message.content}")

    # --- Tool Execution Logic ---
    if response_message.tool_calls:
        tool_calls = response_message.tool_calls
        tool_call = tool_calls[0]
        tool_name = tool_call.function.name
        tool_args = json.loads(tool_call.function.arguments)
        
        print(f"🤖 模型选择调用工具：**{tool_name}**")

        tool_result_content = await execute_tool(tool_name, tool_args)
        
        # 将工具调用的结果添加到对话历史中
        messages.append(
            {
                "tool_call_id": tool_call.id,
                "role": "tool",
                "name": tool_name,
                "content": tool_result_content,
            }
        )

        # --- 2. 第二次 LLM 调用：生成最终回复 ---
        print("\n--- 2. 第二次 LLM 调用：生成最终回复 ---")
        
        final_response = client.chat.completions.create(
            model="DeepSeek-R1",
            messages=messages,
            max_tokens=8192,
        )

        final_response_message = final_response.choices[0].message
        final_content = final_response_message.content
        
        # **修复点：在保存最终回复前，使用 .model_dump() 转换为字典**
        messages.append(final_response_message.model_dump())
        
        print(f"🎉 最终回复:\n{final_content}")
        
    else:
        # 模型未调用工具，直接回复的消息已在前面添加
        print(f"模型未调用工具，直接回复:\n{response_message.content}")

    # --- 5. 保存数据 ---
    save_training_data(messages, TRAINING_DATA_FILE)

async def run_batch_conversations():
    # ----------------------------------------------------
    # 您的 3 个不同用户查询
    # ----------------------------------------------------
    CONVERSATIONS = [
        # 样本 1: 触发 crawl_company_website 工具
        "请爬取 https://www.metax-tech.com/ 的官网内容，帮我了解这家公司的基本信息",
        
        # 样本 2: 触发 search_documents 工具
        "请搜索一下 沐曦公司简介和最近的新闻报道。",
        
        # 样本 3: 不触发任何工具
        "中国的首都在哪里",
    ]
    
    # 确保文件是空的，以便从头开始记录
    open(TRAINING_DATA_FILE, 'w').close()
    
    for query in CONVERSATIONS:
        await handle_single_conversation(query)
        # 延迟一下，避免 API 速率限制
        await asyncio.sleep(1)

# 这是推荐的、唯一的程序入口点
if __name__ == '__main__':
    # 运行批处理主函数
    asyncio.run(run_batch_conversations())

# async def main():
#     client = Client(mcp)
#     async with client:
#         # 查看可用工具
#         tools = await client.list_tools()
#         print('可用工具:', tools)

#         # Tavily提取页面测试
#         crawl_result = await client.call_tool('crawl_company_website', {'url': 'https://www.metax-tech.com/'})
#         print("爬取结果：", crawl_result.structured_content)

#         # Tavily搜索测试
#         result = await client.call_tool('search_documents', {
#             'queries': ['沐曦公司简介', '沐曦财务状况'],
#         })
#         print("搜索结果：", json.dumps(result.structured_content, indent=2, ensure_ascii=False))

# if __name__ == '__main__':
#     asyncio.run(main())
