"""
FastMCP服务器实现
使用FastAPI框架实现的Model-Client Protocol (MCP)服务器
支持与Ollama模型的交互，提供工具调用功能
"""

# 导入必要的库
import json  # 用于JSON数据处理
import logging  # 用于日志记录
from typing import Dict, Optional, Set  # 类型提示

import aiohttp  # 用于异步HTTP请求
import uvicorn  # ASGI服务器
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Body  # FastAPI框架组件
from fastapi.middleware.cors import CORSMiddleware  # CORS中间件
from pydantic import BaseModel  # 数据验证

# 导入自定义组件
try:
    from mcp.llm.ollama_qwen import OllamaQwen
    from mcp.components.retrieval import query_retrieval
    from mcp.components.tools import call_tool
    from mcp.components.memory import read_memory, write_memory
except ImportError:
    from llm.ollama_qwen import OllamaQwen
    from components.retrieval import query_retrieval
    from components.tools import call_tool
    from components.memory import read_memory, write_memory

llm = OllamaQwen()

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,  # 设置日志级别为INFO
    format='%(asctime)s - %(levelname)s - %(message)s'  # 日志格式：时间 - 级别 - 消息
)
logger = logging.getLogger(__name__)  # 创建日志记录器


# 定义MCP消息模型
class MCPMessage(BaseModel):
    """MCP消息模型
    用于定义客户端和服务器之间交换的消息格式
    """
    type: str  # 消息类型（如：chat, tool_call, response等）
    content: Dict  # 消息内容
    id: Optional[str] = None  # 可选的消息ID


class MCPTool(BaseModel):
    """MCP工具模型
    用于定义可用的工具及其参数
    """
    name: str  # 工具名称
    description: str  # 工具描述
    parameters: Dict  # 工具参数定义


# Ollama API配置
OLLAMA_API_URL = "http://localhost:11434/api"  # Ollama服务地址
MODEL_NAME = "qwen3:8b"  # 使用的模型名称

# 示例工具定义
TOOLS = [
    # 计算器工具
    MCPTool(
        name="calculator",
        description="一个简单的计算器，可以执行基本算术运算",
        parameters={
            "type": "object",
            "properties": {
                "operation": {
                    "type": "string",
                    "enum": ["add", "subtract", "multiply", "divide"]  # 支持的运算类型
                },
                "a": {"type": "number"},  # 第一个操作数
                "b": {"type": "number"}  # 第二个操作数
            },
            "required": ["operation", "a", "b"]  # 必需参数
        }
    ),
    # 天气查询工具
    MCPTool(
        name="weather",
        description="获取指定位置的当前天气信息",
        parameters={
            "type": "object",
            "properties": {
                "location": {"type": "string"},  # 位置名称
                "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}  # 温度单位
            },
            "required": ["location"]  # 必需参数
        }
    )
]


class ConnectionManager:
    """连接管理器
    管理WebSocket连接和Ollama服务状态
    """

    def __init__(self):
        self.active_connections: Set[WebSocket] = set()  # 活跃的WebSocket连接集合
        self.ollama_connected = False  # Ollama服务连接状态

    async def connect(self, websocket: WebSocket):
        """处理新的WebSocket连接
        Args:
            websocket: WebSocket连接对象
        """
        await websocket.accept()  # 接受连接
        self.active_connections.add(websocket)  # 添加到活跃连接集合
        logger.info(f"新客户端连接。当前连接数：{len(self.active_connections)}")

    def disconnect(self, websocket: WebSocket):
        """处理WebSocket连接断开
        Args:
            websocket: 要断开的WebSocket连接
        """
        self.active_connections.remove(websocket)  # 从活跃连接集合中移除
        logger.info(f"客户端断开连接。剩余连接数：{len(self.active_connections)}")

    async def send_message(self, websocket: WebSocket, message: Dict):
        """发送消息到WebSocket客户端
        Args:
            websocket: 目标WebSocket连接
            message: 要发送的消息
        Raises:
            Exception: 发送消息失败时抛出异常
        """
        try:
            await websocket.send_json(message)  # 发送JSON格式消息
        except Exception as e:
            logger.error(f"发送消息失败: {e}")
            raise

    async def check_ollama_connection(self):
        """检查Ollama服务连接状态
        通过发送测试请求验证服务可用性和模型状态
        Returns:
            bool: 连接状态，True表示连接成功
        """
        try:
            async with aiohttp.ClientSession() as session:
                # 检查服务是否响应
                async with session.get(f"{OLLAMA_API_URL}/tags") as response:
                    if response.status != 200:
                        logger.error(f"Ollama服务返回状态码 {response.status}")
                        self.ollama_connected = False
                        return False
                    logger.info("Ollama服务响应正常")

                # 检查模型是否可用
                test_message = {
                    "model": MODEL_NAME,
                    "messages": [{"role": "user", "content": "test"}],
                    "stream": False
                }
                logger.info(f"测试模型连接: {json.dumps(test_message)}")
                async with session.post(f"{OLLAMA_API_URL}/chat", json=test_message) as response:
                    if response.status != 200:
                        logger.error(f"模型测试返回状态码 {response.status}")
                        self.ollama_connected = False
                        return False

                    response_data = await response.json()
                    if "error" in response_data:
                        logger.error(f"模型错误: {response_data['error']}")
                        self.ollama_connected = False
                        return False

                    self.ollama_connected = True
                    logger.info(f"成功连接到Ollama模型: {MODEL_NAME}")
                    return True

        except aiohttp.ClientError as e:
            logger.error(f"连接Ollama服务失败: {str(e)}")
            self.ollama_connected = False
            return False
        except Exception as e:
            logger.error(f"检查Ollama连接时发生意外错误: {str(e)}")
            self.ollama_connected = False
            return False

    async def call_ollama(self, message: str) -> str:
        """调用Ollama API进行对话
        Args:
            message: 用户输入的消息
        Returns:
            str: 模型的响应内容
        Raises:
            ConnectionError: 连接失败时抛出
            ValueError: 响应格式无效时抛出
        """
        if not self.ollama_connected:
            # 尝试重新连接
            if not await self.check_ollama_connection():
                raise ConnectionError("Ollama服务不可用")

        try:
            async with aiohttp.ClientSession() as session:
                request = {
                    "model": MODEL_NAME,
                    "messages": [{"role": "user", "content": message}],
                    "stream": False
                }
                logger.debug(f"发送请求到Ollama: {json.dumps(request)}")

                async with session.post(f"{OLLAMA_API_URL}/chat", json=request) as response:
                    if response.status != 200:
                        error_msg = f"Ollama API返回状态码 {response.status}"
                        logger.error(error_msg)
                        self.ollama_connected = False
                        raise ConnectionError(error_msg)

                    response_data = await response.json()
                    logger.debug(f"收到Ollama响应: {response_data}")

                    if "error" in response_data:
                        error_msg = response_data["error"]
                        logger.error(f"Ollama API错误: {error_msg}")
                        self.ollama_connected = False
                        raise ConnectionError(f"Ollama API错误: {error_msg}")

                    if "message" not in response_data:
                        logger.error("Ollama响应格式无效: 缺少message字段")
                        raise ValueError("Ollama响应格式无效")

                    return response_data["message"]["content"]

        except aiohttp.ClientError as e:
            logger.error(f"调用Ollama时发生错误: {str(e)}")
            self.ollama_connected = False
            raise ConnectionError(f"连接Ollama服务失败: {str(e)}")
        except Exception as e:
            logger.error(f"调用Ollama时发生意外错误: {str(e)}")
            self.ollama_connected = False
            raise

    async def handle_tool_call(self, tool_data: Dict) -> Dict:
        """处理工具调用请求
        Args:
            tool_data: 工具调用数据，包含工具名称和参数
        Returns:
            Dict: 工具调用结果
        Raises:
            Exception: 工具调用失败时抛出
        """
        tool_name = tool_data["name"]
        params = tool_data["parameters"]
        logger.debug(f"处理工具调用: {tool_name}, 参数: {params}")

        try:
            if tool_name == "calculator":
                return await self.handle_calculator(params)
            elif tool_name == "weather":
                return await self.handle_weather(params)
            else:
                logger.warning(f"未知工具调用: {tool_name}")
                return {"error": "未知工具"}
        except Exception as e:
            logger.error(f"处理工具调用 {tool_name} 时发生错误: {e}")
            raise

    async def handle_calculator(self, params: Dict) -> Dict:
        """处理计算器工具调用
        Args:
            params: 计算器参数，包含运算类型和操作数
        Returns:
            Dict: 计算结果
        Raises:
            ValueError: 无效的运算类型时抛出
        """
        operation = params["operation"]
        a = params["a"]
        b = params["b"]
        logger.debug(f"计算器运算: {operation}, 操作数: {a} 和 {b}")

        try:
            result = None
            if operation == "add":
                result = a + b
            elif operation == "subtract":
                result = a - b
            elif operation == "multiply":
                result = a * b
            elif operation == "divide":
                result = a / b if b != 0 else "错误：除数不能为零"
            else:
                raise ValueError(f"未知的运算类型: {operation}")

            return {
                "result": result,
                "operation": operation,
                "parameters": params
            }
        except Exception as e:
            logger.error(f"计算器运算错误: {e}")
            raise

    async def handle_weather(self, params: Dict) -> Dict:
        """处理天气查询工具调用（模拟数据）
        Args:
            params: 天气查询参数，包含位置和温度单位
        Returns:
            Dict: 模拟的天气数据
        """
        location = params["location"]
        unit = params.get("unit", "celsius")  # 默认使用摄氏度
        logger.debug(f"天气查询: 位置={location}, 单位={unit}")

        try:
            # 返回模拟的天气数据
            return {
                "location": location,
                "temperature": 25 if unit == "celsius" else 77,  # 模拟温度数据
                "unit": unit,
                "condition": "晴朗",
                "humidity": 65  # 模拟湿度数据
            }
        except Exception as e:
            logger.error(f"天气查询错误: {e}")
            raise


# 创建FastAPI应用
app = FastAPI(
    title="FastMCP Server",
    description="使用FastAPI实现的MCP服务器"
)

# 添加CORS中间件，允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有HTTP方法
    allow_headers=["*"],  # 允许所有HTTP头
)

# 创建连接管理器实例
manager = ConnectionManager()


@app.on_event("startup")
async def startup_event():
    """服务器启动时的事件处理
    检查Ollama服务连接状态
    """
    if not await manager.check_ollama_connection():
        logger.error("无法连接到Ollama服务。服务器可能无法正常工作。")


@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    """WebSocket端点处理
    处理客户端的WebSocket连接和消息
    Args:
        websocket: WebSocket连接对象
    """
    await manager.connect(websocket)
    try:
        # 发送工具定义给客户端
        await manager.send_message(websocket, {
            "type": "tools",
            "content": {"tools": [tool.dict() for tool in TOOLS]}
        })

        while True:
            try:
                # 接收客户端消息
                data = await websocket.receive_json()
                logger.debug(f"收到消息: {data}")

                if data["type"] == "chat":
                    # 处理聊天消息
                    try:
                        response = await manager.call_ollama(data["content"]["message"])
                        await manager.send_message(websocket, {
                            "type": "response",
                            "content": {"message": response}
                        })
                    except Exception as e:
                        logger.error(f"处理聊天消息时发生错误: {e}")
                        await manager.send_message(websocket, {
                            "type": "error",
                            "content": {"message": f"错误: {str(e)}"}
                        })

                elif data["type"] == "tool_call":
                    # 处理工具调用
                    try:
                        tool_response = await manager.handle_tool_call(data["content"])
                        await manager.send_message(websocket, {
                            "type": "tool_response",
                            "content": tool_response
                        })
                    except Exception as e:
                        logger.error(f"处理工具调用时发生错误: {e}")
                        await manager.send_message(websocket, {
                            "type": "error",
                            "content": {"message": f"错误: {str(e)}"}
                        })

            except json.JSONDecodeError as e:
                logger.error(f"无效的JSON消息: {e}")
                await manager.send_message(websocket, {
                    "type": "error",
                    "content": {"message": "无效的JSON消息"}
                })
            except Exception as e:
                logger.error(f"处理消息时发生错误: {e}")
                await manager.send_message(websocket, {
                    "type": "error",
                    "content": {"message": f"服务器错误: {str(e)}"}
                })

    except WebSocketDisconnect:
        manager.disconnect(websocket)
    except Exception as e:
        logger.error(f"WebSocket错误: {e}")
        manager.disconnect(websocket)


@app.get("/health")
async def health_check():
    """健康检查端点
    返回服务器和Ollama服务的状态信息
    Returns:
        Dict: 包含服务器状态信息的字典
    """
    return {
        "status": "healthy",
        "ollama_connected": manager.ollama_connected,
        "active_connections": len(manager.active_connections)
    }


@app.get("/tools")
async def get_tools():
    """获取可用工具列表
    Returns:
        Dict: 包含可用工具列表的字典
    """
    return {"tools": [tool.dict() for tool in TOOLS]}


@app.post("/mcp_fastapi")
async def mcp_fastapi(user_input: str = Body(..., embed=True)):
    retrieval_result = query_retrieval(user_input)
    tool_result = call_tool("example_tool", {"input": user_input})
    write_memory("last_input", user_input)
    memory_result = read_memory("last_input")
    prompt = f"用户输入: {user_input}\n检索: {retrieval_result}\n工具: {tool_result}\n记忆: {memory_result}"
    llm_result = await llm.generate(prompt)
    return {"output": llm_result}


if __name__ == "__main__":
    # 启动服务器
    uvicorn.run(
        "fast_mcp_server:app",
        host="localhost",  # 服务器主机地址
        port=8000,  # 服务器端口
        reload=True,  # 开发模式下启用热重载
        log_level="info"  # 日志级别
    )
