"""
注册多 Server 的 MCP Client 实现

todo
改为关系型数据库
* 总结历史消息：参考NextChat：{"role": "system", "content": "简要总结一下对话内容，用作后续的上下文提示 prompt，控制在 200 字以内"}
* 历史消息携带阈值：即当历史消息过多的时候，只携带总结消息+最后的若干条消息
* 会话隔离 & 数据持久化
"""
import asyncio
import logging
import json
import os

from dotenv import load_dotenv
from fastmcp import Client
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessage

load_dotenv()
logger = logging.getLogger(__name__)

mcp_config = {
    "mcpServers": {
        "local_server": {
            # Local stdio server
            "transport": "stdio",
            "command": "python",
            "args": ["./server.py", "--verbose"],
            "env": {"DEBUG": "true"},
            # "cwd": "/path/to/server",  # 工作目录
        },
        # "sequential-thinking": {
        #     "command": "npx",
        #     "args": [
        #         "-y",
        #         "@modelcontextprotocol/server-sequential-thinking"
        #     ]
        # },
        "mongodb": {
            # https://github.com/kiliczsh/mcp-mongo-server
            # npm install -g mcp-mongo-server
            "command": "cmd",
            "args": [
                "/c",
                "npx",
                "-y",
                "mcp-mongo-server",
                "mongodb://localhost:27017/studentManagement"
            ]
        }
    }
}
# SYSTEM = """
# 你是一个助手，请在需要的时候借助工具，可以使用MongoDB查询数据，使用中文回答用户问题
# 收到用户请求后：
# 1. 生成一个任务列表
# 2. 按顺序调用本地或远程工具（函数、脚本、API）
# 3. 等待返回上一步执行结果，再决定是否继续或修正。
# 4. 遇到错误即给出修复方案。
# """
# SYSTEM = """
# 你是一个助手，请在需要的时候借助工具，可以使用MongoDB查询数据，如果不清楚数据库的字段则优先查询数据库字段，使用中文回答用户问题
# 收到用户请求后：
# 1. 在生成任务列表之前，可以选择调用mcp工具的list_prompts()来检索所有可用的提示模板，使用list_tools()检索可用的工具
# 2. 生成一个任务列表
# 3. 按顺序调用本地或远程工具（函数、脚本、API），调用前询问用户是继续或者拒绝或者修正
# 4. 如果用户的回答是"继续"，则继续执行，如果用户的回答是"拒绝"，则终止执行，如果用户的回答是"修正"，则给出修复方案
# 5. 对查询结果进行反思，如果不符则给出修复方案
# 5. 遇到错误即给出修复方案。
# """
SYSTEM = """
你是一个助手，请在需要的时候借助工具，使用中文回答用户问题，回答问题时不要携带"数据库"字眼
如果用户问题是关于学校、班级、老师、同学、成绩，请查询MongoDB数据后回答
请遵循思考过程：
1. 生成一个任务列表
2. 当查询需要条件过滤的时候，如果不清楚过滤范围，需要些查询字段的值范围，再进行判断
3. 将问题拆分成生成多个子问题，如果是查询的请求，请预设好问题的答案
4. 查询后的结果如果不符合预设的答案，则检查是查询请求的问题还是预设答案的问题
5. 直至返回最终答案
数据库表结构说明如下
### 班级表classes
{
  "$jsonSchema": {
    "bsonType": "object",
    "required": [
      "_id",
      "homeroom_teacher_id",
      "name",
      "year"
    ],
    "properties": {
      "_id": {
        "bsonType": "objectId"
      },
      "homeroom_teacher_id": {
        "bsonType": "objectId"
      },
      "name": {
        "bsonType": "string"
      },
      "year": {
        "bsonType": "int"
      }
    }
  }
}
### 课程表courses
{
  "$jsonSchema": {
    "bsonType": "object",
    "required": [
      "_id",
      "credit",
      "name",
      "teacher_id"
    ],
    "properties": {
      "_id": {
        "bsonType": "objectId"
      },
      "credit": {
        "bsonType": "int"
      },
      "name": {
        "bsonType": "string"
      },
      "teacher_id": {
        "bsonType": "objectId"
      }
    }
  }
}
### 分数表scores
{
  "$jsonSchema": {
    "bsonType": "object",
    "required": [
      "_id",
      "course_id",
      "score",
      "student_id"
    ],
    "properties": {
      "_id": {
        "bsonType": "objectId"
      },
      "course_id": {
        "bsonType": "objectId"
      },
      "score": {
        "bsonType": "int"
      },
      "student_id": {
        "bsonType": "objectId"
      }
    }
  }
}
### 学生表students
{
  "$jsonSchema": {
    "bsonType": "object",
    "required": [
      "_id",
      "birth",
      "class_id",
      "email",
      "gender",
      "name"
    ],
    "properties": {
      "_id": {
        "bsonType": "objectId"
      },
      "birth": {
        "bsonType": "string"
      },
      "class_id": {
        "bsonType": "objectId"
      },
      "email": {
        "bsonType": "string"
      },
      "gender": {
        "bsonType": "string"
      },
      "name": {
        "bsonType": "string"
      }
    }
  }
}
### 老师表teachers
{
  "$jsonSchema": {
    "bsonType": "object",
    "required": [
      "_id",
      "email",
      "name",
      "subjects"
    ],
    "properties": {
      "_id": {
        "bsonType": "objectId"
      },
      "email": {
        "bsonType": "string"
      },
      "name": {
        "bsonType": "string"
      },
      "subjects": {
        "bsonType": "array",
        "items": {
          "bsonType": "string"
        }
      }
    }
  }
}
"""


class MultiServerClient:
    def __init__(self, config):
        self.mcp_client = Client(config)  # mcp client
        self.openai_client = AsyncOpenAI(base_url=os.getenv('BASE_URL'), api_key=os.getenv('API_KEY'))  # 大模型服务
        self.model = os.getenv('MODEL')
        self.tools = []
        self.message = [
            {"role": "system", "content": SYSTEM}
        ]

    async def prepare_tools(self):
        """
        tools = [
            {
                'type': 'function',
                'function': {
                    'name': 'multiply', 'description': 'Multiplies two numbers.',
                    'input_schema': {
                        'properties': {
                            'a': {'title': 'A', 'type': 'number'},
                            'b': {'title': 'B', 'type': 'number'}
                        },
                        'required': ['a', 'b'],
                        'type': 'object'
                    }
                }
            }
        ]
        """
        _tools = await self.mcp_client.list_tools()
        tools = [
            {
                "type": "function",
                "function": {
                    "name": tool.name,
                    "description": tool.description,
                    # "parameters": tool.parameters,
                    "input_schema": tool.inputSchema,
                },
            }
            for tool in _tools
        ]
        logger.info(f"Tools init: {tools}")
        return tools

    async def chat(self, message: list[dict]) -> ChatCompletionMessage:
        # response = self.openai_client.chat.completions.create(
        #     model=self.model,
        #     messages=message,
        #     # stream = True,
        #     tools=self.tools,
        #     timeout=20,
        # )
        response = await self.openai_client.chat.completions.create(
            model=self.model,
            messages=message,
            # stream = True,
            tools=self.tools,
            timeout=20,
        )
        logger.info(f"思考过程: {response}")
        # logger.info(f"推理内容: {response.choices[0].message.reasoning_content}")
        content = response.choices[0].message.content
        if content:
            self.message.append({"role": "assistant", "content": content})

        # 进行递归，当finish_reason为tool_calls时进行工具调用
        logger.info(f"response.choices[0].finish_reason: {response.choices[0].finish_reason}")
        final_text = []
        if response.choices[0].finish_reason == "tool_calls":
            # 让client告诉大模型执行工具，多个工具需遍历执行完，并拼接所有response
            for tool_call in response.choices[0].message.tool_calls:
                func_name = tool_call.function.name
                func_args = json.loads(tool_call.function.arguments)
                logger.info(f"{func_name} {func_args}")
                _resp = await self.mcp_client.call_tool(func_name, func_args, timeout=10)
                logger.info(_resp)
                _assistant_text = f"[Calling tool {func_name} with args {func_args}]"
                final_text.append(_assistant_text)
                _text = _resp.content[0].text if not _resp.is_error else f"error"
                # self.message.append(json.loads(response.choices[0].message.model_dump_json()))
                self.message.append({"role": "assistant", "content": _assistant_text})
                self.message.append(
                    {
                        # "role": "assistant",
                        "role": "user",
                        "content": f"调用{tool_call.function.name}工具，"
                                   f"参数为{tool_call.function.arguments}，"
                                   f"结果为: {_text}"
                    }
                )
            print("\n".join(final_text))
            return await self.chat(self.message)

        return response.choices[0].message

    async def loop(self):
        async with self.mcp_client:
            await self.mcp_client.ping()  # Ping the server
            self.tools = await self.prepare_tools()
            # todo list_prompts 和 list_resources
            await asyncio.sleep(0.5)
            while True:
                question = input("User: ").strip()
                message = {"role": "user", "content": question}
                self.message.append(message)
                response_message = await self.chat(self.message)
                logger.warning(f"当前上下文: {self.message}")
                print(f"Bot: {response_message.content}")
                await asyncio.sleep(0.5)


async def main(log=False):
    logger_level = logging.INFO if log else logging.ERROR
    logging.basicConfig(level=logger_level)
    user_client = MultiServerClient(mcp_config)
    await user_client.loop()


if __name__ == '__main__':
    # asyncio.run(main())
    asyncio.run(main(log=True))
