import asyncio
from typing import Dict, List, Optional, Any, Union
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema import ChatResult
from langchain.schema.messages import BaseMessage, AIMessage, HumanMessage
from langchain.chat_models.base import BaseChatModel
from langchain_core.outputs import ChatGeneration
from mcp.server.fastmcp import FastMCP

class QwenLLM(BaseChatModel):
    """Qwen3-32B大模型实现"""
    
    model_name: str = "qwen3-32b"
    base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
    api_key: str = ""
    mcp_client: Optional[FastMCP] = None
    
    @property
    def _llm_type(self) -> str:
        return "qwen3-32b"
    
    # 修改_generate方法为异步方法
    async def _generate(
        self,
        messages: List[BaseMessage],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> ChatResult:
        """生成回复"""
        if not self.mcp_client:
            raise ValueError("MCP client is not initialized")
        
        # 准备输入
        prompt = ""
        for message in messages:
            if isinstance(message, HumanMessage):
                prompt += f"Human: {message.content}\n"
            elif isinstance(message, AIMessage):
                prompt += f"AI: {message.content}\n"
        
        prompt += "AI: "
        
        # 使用MCP客户端调用模型
        response = await self._async_generate(prompt)  # 移除非法的同步调用
        
        # 创建生成结果
        message = AIMessage(content=response)
        generation = ChatGeneration(
            message=message,
            generation_info={"model": self.model_name}
        )
        
        return ChatResult(generations=[generation])
    
    async def _async_generate(self, prompt: str) -> str:
        """异步调用MCP服务"""
        # 调用MCP服务器上的generate方法
        response = await self.mcp_client.generate(prompt=prompt)
        return response

# 修改main函数中的调用方式
async def main():
    # 创建MCP客户端
    mcp_client = FastMCP(url="http://10.249.11.250:8080/sse", transport="sse")
    
    # 初始化Qwen LLM
    llm = QwenLLM()
    llm.mcp_client = mcp_client
    
    # 测试对话
    messages = [HumanMessage(content="你好，请介绍一下自己")]
    response = await llm._generate(messages)  # 添加await关键字
    
    print("\n模型回复:")
    print(response.generations[0].message.content)
    
    # 交互式对话
    print("\n开始交互对话 (输入'exit'退出):")
    while True:
        user_input = input("\n你: ")
        if user_input.lower() == 'exit':
            break
        
        messages.append(HumanMessage(content=user_input))
        response = llm._generate(messages)
        ai_message = response.generations[0].message
        messages.append(ai_message)
        
        print(f"\n助手: {ai_message.content}")

if __name__ == "__main__":
    asyncio.run(main())

from mcp.server.fastmcp import FastMCP

# 在FastMCP类中添加generate方法
class FastMCP:
    async def generate(self, prompt: str) -> str:
        """实现具体的SSE协议通信逻辑"""
        # 示例实现（需要根据实际协议补充）
        async with aiohttp.ClientSession() as session:
            async with session.post(
                self.url,
                json={"prompt": prompt},
                headers={"Authorization": f"Bearer {self.api_key}"}
            ) as response:
                return await response.text()
