import asyncio
import json
import argparse
import os

from typing import List, Dict, Any
from test1 import MCPClient
import logging

DEFAULT_MODEL = os.environ.get("OPENAI_MODEL", "gpt-4")


class QuestionManager:
    # Prompt templates
    DECOMP_PROMPT_SYSTEM = (
        "将用户的复杂问题分解为多个简单步骤。每个步骤应该是一个明确的子问题或指令，可以单独回答或执行。以JSON数组格式返回这些步骤。"
        "只允许从以下类型中选择：当前时间、天气、ip地址、驾车路线、车票信息、美食信息、景点信息、酒店信息。"
        "不允许返回其他类型的延申子任务，也不允许生成总结类型的子任务。"
        "如果用户输入比较简短：明天去上海一日游怎么安排？，需要分解成：查询当地的ip信息、查询明天上海的天气、查询东莞到上海的列车信息、查询上海的酒店信息、使用关键词搜索查询上海的热门景点、使用关键词搜索查询上海的美食"
    )
    DECOMP_INSTRUCTION_TEMPLATE = "请将以下问题分解为多个步骤：\n\n{}"
    SUMMARY_PROMPT_SYSTEM = "你是一个帮助整合多步骤问题解答的助手。请基于提供的各步骤处理结果，生成一个全面、连贯且有逻辑的最终回答。确保包含所有重要信息，并以用户友好的方式展示。包含图片url等"

    def __init__(self, model: str = None):
        self.client = MCPClient()
        self.model = model or DEFAULT_MODEL
        self.results = []

    async def initialize(self):
        """Connect to MCP servers"""
        try:
            await self.client.connect_all()

            return True
        except Exception as e:

            await self.client.cleanup()
            return False

    async def decompose_question(self, question: str) -> List[dict]:
        """Break down a complex question into steps and process each step, returning results for each."""
        try:
            # 增加分解子问题的范例
            example = (
                "【范例】\n"
                "用户问题：'明天我想去上海一日游，安排来回列车，价格，包括酒店信息，旅游的景点，到上海后之间的旅程依靠打车出行，安排个计划'\n"
                "分解结果：\n"
                "[查询当地的ip信息\n  '查询明天上海的天气',\n  '查询当前的时间',\n  '查询南京到上海的列车信息',\n  '查询上海的酒店信息',\n  '使用关键词搜索查询上海的热门景点',\n  '使用关键词搜索查询上海的美食',\n ]"
            )
            messages = [
                {"role": "system", "content": self.DECOMP_PROMPT_SYSTEM + '\n' + example},
                {"role": "user", "content": self.DECOMP_INSTRUCTION_TEMPLATE.format(question)}
            ]
            response = await self.client.openai.chat.completions.create(
                model=self.model,
                messages=messages,
                response_format={"type": "json_object"}
            )
            content = response.choices[0].message.content
            steps_data = json.loads(content)
            if "steps" in steps_data:
                sub_questions = steps_data["steps"]
            elif isinstance(steps_data, list):
                sub_questions = steps_data
            else:
                sub_questions = [question]

            print(f"\n分解得到 {len(sub_questions)} 个子任务:")
            for i, sub in enumerate(sub_questions):
                print(f"  [{i + 1}] 子任务: {sub}")

        except Exception as e:
            sub_questions = [question]

        # 针对每个子问题调用 process_query
        results = []
        for subq in sub_questions:
            sub_result = {"sub_question": subq, "tool_results": [], "summary": ""}
            content = ""
            try:
                async for chunk in self.client.process_query(subq):
                    if chunk["type"] == "content":
                        content += chunk["content"]
                    elif chunk["type"] == "tool_result":
                        sub_result["tool_results"].append(chunk["content"])
                sub_result["summary"] = content
            except Exception as e:
                sub_result["summary"] = f"处理失败: {str(e)}"
            results.append(sub_result)
        return results

    async def process_steps(self, steps: List[str]) -> Dict[str, Any]:
        """Process each step and collect results"""
        results = []

        for i, step in enumerate(steps):
            print(f"\n[步骤 {i + 1}/{len(steps)}] {step}")
            step_result = {
                "step": step,
                "contents": [],
                "tool_calls": [],
                "tool_results": []
            }

            try:
                async for chunk in self.client.process_query(step):
                    if chunk["type"] == "content":
                        print(chunk["content"], end="", flush=True)
                        step_result["contents"].append(chunk["content"])
                    elif chunk["type"] == "tool_call":
                        tool_name = chunk["name"]
                        print(f"\n[调用工具: {tool_name}]", end="", flush=True)
                        step_result["tool_calls"].append({"name": tool_name})
                    elif chunk["type"] == "tool_result":
                        content = chunk["content"]
                        name = chunk["name"]
                        cont = str(content)
                        if len(cont) > 200:
                            display_content = cont[:200] + "...内容过长，已省略"
                        else:
                            display_content = cont
                        print(f"\n[工具返回: {name}]\n{display_content}", end="", flush=True)
                        step_result["tool_results"].append({"name": name, "content": content})
                    elif chunk["type"] == "tool_error" or chunk["type"] == "error":
                        error_msg = chunk.get("error", chunk.get("content", "未知错误"))
                        print(f"\n[错误]: {error_msg}", end="", flush=True)
                        step_result["errors"] = step_result.get("errors", []) + [error_msg]

                print("\n")
                results.append(step_result)
            except Exception as e:
                print(f"处理步骤失败: {str(e)}")
                step_result["errors"] = step_result.get("errors", []) + [str(e)]
                results.append(step_result)

        return {"steps": results}

    async def generate_summary(self, processed_results: Dict[str, Any]) -> str:
        """Generate a final summary based on all step results"""
        try:
            # 构建提示，包含所有步骤的结果
            prompt = "基于以下各个步骤的处理结果，生成一个完整且连贯的最终回答：\n\n"

            for i, step_result in enumerate(processed_results["steps"]):
                prompt += f"步骤 {i + 1}: {step_result['step']}\n"

                # 添加步骤中的内容
                content = "".join(step_result["contents"])
                prompt += f"回答: {content}\n"

                # 添加工具调用结果
                for j, tool_result in enumerate(step_result["tool_results"]):
                    prompt += f"工具 '{tool_result['name']}' 返回: {tool_result['content']}\n"

                # 添加错误信息（如果有）
                if "errors" in step_result and step_result["errors"]:
                    prompt += f"错误: {', '.join(step_result['errors'])}\n"

                prompt += "\n"

            # Build summarization messages
            messages = [
                {"role": "system", "content": self.SUMMARY_PROMPT_SYSTEM},
                {"role": "user", "content": prompt}
            ]

            response = await self.client.openai.chat.completions.create(
                model=self.model,
                messages=messages
            )

            return response.choices[0].message.content

        except Exception as e:

            # 如果生成总结失败，返回一个简单的错误消息
            return f"无法生成总结: {str(e)}"

    async def process_question(self, question: str, tool_decompose: int = 0) -> str:
        tools = await self.client.list_tools()
        decomposed = await self.decompose_question(question)
        print(f"\n分解得到 {len(decomposed)} 个子任务:")
        for i, sub in enumerate(decomposed):
            print(f"  [{i + 1}] 子任务: {sub['sub_question']}")
            tool_results_str = str(sub['tool_results'])
            if len(tool_results_str) > 200:
                tool_results_str = tool_results_str[:200] + '...内容过长，已省略'
            print(f"      工具结果: {tool_results_str}")
            print(f"      总结: {sub['summary']}")
        print("======================================\n")

        # 过滤掉没有调用工具的子任务
        decomposed = [sub for sub in decomposed if sub.get('tool_results')]

        # 汇总所有子任务，送入大模型
        prompt = f"用户原始问题：{question}\n请根据以下子任务的工具结果和总结，根据时间为轴，生成最终的规划安排，必须包含图片的url，不得删去，方便可视化观看：\n"
        for i, sub in enumerate(decomposed):
            prompt += f"子任务{i + 1}: {sub['sub_question']}\n"
            # prompt += f"工具结果: {sub['tool_results']}\n"  # tool返回的结果过长不好
            prompt += f"总结: {sub['summary']}\n\n"

        # 重新用openai的client发起请求，总结最终的路线规划
        from openai import OpenAI
        openai_client = OpenAI(
            base_url=os.environ.get('OPENAI_API_BASE', 'https://api-inference.modelscope.cn/v1/'),
            api_key=os.environ.get('OPENAI_API_KEY', '356a47d7-0202-4c9d-87c4-547cfc1a1a95')
        )
        messages = [
            {"role": "system",
             "content": "你是一个帮助用户整合多步骤任务结果并给出最终规划的智能助手，主要通过时间流程去安排日程。"},
            {"role": "user", "content": prompt}
        ]
        response = openai_client.chat.completions.create(
            model=self.model,
            messages=messages
        )
        final_plan = response.choices[0].message.content
        print("最终规划安排：\n", final_plan)

        # 4. 调用大模型生成html文件，要求表格展示行程规划
        html_prompt = (
                "请将以下行程规划内容用html格式输出，要求：\n"
                "1. 行程规划部分用表格展示，表头包含：时间、行程、地点信息、距离、出行方式、耗时、费用。\n"
                "2. 相关图片请以<img src=...>形式插入在合适位置。\n"
                "3. 费用预算也用表格形式给出，表头包含：项目、金额。\n"
                "4. 不允许返回其他内容，只允许返回html内容。不得出现其他无关的文字\n"
                "5. 对于每个推荐的酒店，配置酒店的图片url，图片url需要以<img src=...>形式插入在合适位置。\n"
                "6. 先放置行程规划表，在放置酒店信息及图片，其次是景点照片和图片，然后是美食信息和图片，最后是预算信息，每一张图片下面需要配有文字信息\n每种图片放三张"
                "7. 其余内容可适当美化排版。图片格式要适中\n"
                "内容如下：\n" + final_plan
        )
        html_messages = [
            {"role": "system", "content": "你是一个专业的旅行行程HTML页面生成助手。"},
            {"role": "user", "content": html_prompt}
        ]
        html_response = openai_client.chat.completions.create(
            model=self.model,
            messages=html_messages
        )
        html_content = html_response.choices[0].message.content
        with open("final_plan.html", "w", encoding="utf-8") as f:
            f.write(html_content)
        print("HTML文件已生成: final_plan.html")
        return final_plan

    async def cleanup(self):
        """Clean up resources"""
        await self.client.cleanup()


async def main():
    import logging
    logging.disable(logging.CRITICAL)

    parser = argparse.ArgumentParser(description="问题管理器")
    parser.add_argument("--question", "-q", type=str, help="要处理的问题")
    parser.add_argument("--model", "-m", type=str, default=DEFAULT_MODEL, help="OpenAI 模型名称")
    args = parser.parse_args()
    # override model if provided

    manager = QuestionManager(model=args.model)
    if not await manager.initialize():
        return

    # 检查并打印所有可用工具
    tools = await manager.client.list_tools()
    if not tools:
        print("未检测到任何可用工具，请检查MCP服务连接和配置。")
    else:
        print("可用工具列表：")
        for tool in tools:
            print(f"- {tool.name}: {tool.description}")

    try:
        if args.question:
            # 如果通过命令行提供了问题，直接处理
            await manager.process_question(args.question)
        else:
            # 否则进入交互模式
            print("已进入交互模式。输入问题或输入 'exit' 退出。")
            while True:
                question = input("\n请输入问题: ").strip()
                if question.lower() in ("exit", "quit"):
                    print("再见！")
                    break
                if not question:
                    continue

                await manager.process_question(question)
    finally:
        await manager.cleanup()


if __name__ == "__main__":
    asyncio.run(main()) 