import json
from typing import Any, Dict, Generator, Union, List
from .config_loader import config
from .logger import get_logger
import openai
from .channel import Channel
from .message import Message, MessageChunk, ToolCallMessage, ToolCallResponseMessage

from fastmcp import Client as MCPClient
from tool import mcp

# 创建日志器
logger = get_logger(__name__)

client = openai.Client(
    base_url=config["base_url"],
    api_key=config["api_key"]
)

mcp_client = MCPClient(mcp)

def create_chat_completion(messages, stream=False, **kwargs):
    return client.chat.completions.create(
        model=config["model"],
        messages=messages,
        stream=stream,
        **kwargs
    )

class Agent:
    """ Agent类 """
    def __init__(self, name, system_message, max_context_length=30):
        """ 初始化Agent
        :param name: Agent的名字
        :param system_message: Agent的system message
        :param max_context_length: Agent的context长度, 默认为30
        """
        self.name = name
        self.system_message = system_message
        self.channels: Dict[str, Channel] = {self.name: Channel(self.name, max_context_length)}
        logger.info(f"Agent '{name}' 初始化完成，最大上下文长度: {max_context_length}")
        
    @property
    def context(self):
        """ 
        获取Agent的context. 每个agent的context是自己的默认频道维护的消息队列
        """
        return self.channels[self.name].context
        
    async def generate_response(self, message: Message=None, stream: bool=False, channel: Union[str, Channel]=None, **kwargs) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        生成响应，支持流式和非流式输出. 
        
        参数:
            message: 输入新消息
            stream: 是否使用流式输出
            channel: 指定channel, 默认channel为Agent自带的同名channel
            **kwargs: 其他传递给LLM的参数
            
        返回:
            非流式: Message对象
            流式: 生成器，产生MessageChunk对象
        """
        if channel is None:
            channel = self.channels[self.name]
        elif isinstance(channel, str):
            if channel not in self.channels:
                raise ValueError(f"Channel {channel} does not exist.")
            channel = self.channels[channel]
        if message:
            channel.context.append(message)
            
        llm_messages = self._construct_llm_messages(channel=channel)
        response = create_chat_completion(llm_messages, stream=stream, **kwargs)
        
        return self._process_response(response, stream, channel=channel)
    
    async def reset(self):
        """重置Agent默认channel"""
        self.channels[self.name].reset()
          
    def _process_response(self, responses: List[Message], last_response, stream: bool, channel: Channel, input_user_message: Message=None) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        处理响应，支持流式和非流式
        """
        if not stream:
            # 非流式处理
            content = last_response.choices[0].message.content
            logger.debug(f"output_content: {content}")
            message = Message(source=self.name, content=content)
            if input_user_message:
                channel.context.append(input_user_message)
            channel.context.append(message)
            responses.append(message)
            
            return responses
        else:
            # 流式处理
            def chunk_generator():
                for former_response in responses:
                    yield former_response
                result = ""  # 将result移到生成器内部
                for chunk in last_response:
                    if chunk.choices[0].delta.content is not None:
                        content_chunk = chunk.choices[0].delta.content
                        result += content_chunk
                        yield MessageChunk(
                            source=self.name,  # 使用self.name保持一致性
                            content=content_chunk
                        )
                if input_user_message:
                    channel.context.append(input_user_message)
                # 流式处理完成后，将完整消息添加到上下文
                channel.context.append(Message(source=self.name, content=result))

            return chunk_generator()     
    
    def _construct_llm_messages(self, channel: Channel) -> list[dict]:
        """
        构建LLM消息, 用于调用LLM接口
        """
        sources = set()
        sources.add(self.name)
        llm_messages = []
        a_context = ""
        for message in channel.context:
            sources.add(message.source)
            if message.source == self.name:
                if a_context != "":
                    llm_messages.append({"role": "user", "content": a_context}) 
                    a_context = "" 
                if isinstance(message, ToolCallMessage):
                    llm_messages.append({
                        "role": "assistant",
                        "content": message.content,
                        "tool_calls": [call.model_dump() for call in message.tool_calls]
                    })
                elif isinstance(message, ToolCallResponseMessage):
                    llm_messages.append({
                        "role": "tool",
                        "tool_call_id": message.tool_call_id,
                        "content": json.dumps(message.content, ensure_ascii=False)
                    })
                elif isinstance(message, Message):
                    llm_messages.append({"role": "assistant", "content": message.content})
            else:
                a_context += str(message) + "\n"   
        if a_context != "":
            llm_messages.append({"role": "user", "content": a_context}) # 最后一次回复到对话结束之间的历史消息, 是对话结束的user message
        
        system_message = {"role": "system", "content": f"""
你正在参与多Agent对话。参与者：{sources}。你的身份是{self.name}。
{self.system_message}
你根据所给历史对话，以你的身份设定继续参与对话。
"""}
        input_messages = [system_message] + llm_messages
        logger.debug(f"input_messages: {input_messages}")
        return input_messages
    
    def subscribe(self, channel: Channel):
        """
        订阅频道. 不可以重复订阅同一个名称的频道. 每个Agent默认已经订阅了与自己名称相同的同名频道.
        """
        if not channel.name in self.channels:
            self.channels[channel.name] = channel
        else:
            raise Exception(f"Agent {self.name} already subscribed to channel {channel.name}")

    def unsubscribe(self, channel: Channel):
        """
        取消订阅频道.
        """
        if channel.name in self.channels:
            del self.channels[channel.name]
        else:
            raise Exception(f"Agent {self.name} not subscribed to channel {channel.name}")
        
class ToolAgent(Agent):
    """ 
    工具Agent, 可以使用MCP工具
    """
    
    def __init__(self, name: str, system_message: str, tools: List[str]=[], max_context_length: int=30):
        """ 初始化Agent
        :param name: Agent的名字
        :param system_message: Agent的system message
        :param max_context_length: Agent的context长度, 默认为30
        :param tools: Agent的工具列表, 默认为空. 如不输入, 则会自动从 FastMCP 获取工具列表
        """
        super().__init__(name, system_message, max_context_length)
        self.mcp_host = config["mcp_host"]
        # if tools:
        #     self.tools = self.load_tools(tools)
        # else:
        #     self.tools = {}
        self.tools = tools
            
    async def load_tools(self, tools_init: List[str] = None) -> Dict[str, Dict[str, Any]]:
        """从 FastMCP 获取工具列表并转换为 OpenAI 工具格式"""
        tools = {}
        async with mcp_client:
            data = await mcp_client.list_tools()
            if not tools_init:
                for tool in data:
                    tools[tool.name] = self.convert_fastmcp_to_openai_tool(tool)
            else:
                for tool in data:
                    if tool.name in tools_init:
                        tools[tool.name] = self.convert_fastmcp_to_openai_tool(tool)
                        tools_init.remove(tool.name)
                if tools_init:
                    logger.info(f"未找到工具: {tools_init}")
            return tools
    
    def convert_fastmcp_to_openai_tool(self, fastmcp_tool):
        """将fastMCP的Tool对象转换为OpenAI兼容的tool格式"""
        return {
            "type": "function",
            "function": {
                "name": fastmcp_tool.name,
                "description": fastmcp_tool.description or "",
                "parameters": {
                    "type": "object",
                    "properties": fastmcp_tool.inputSchema["properties"],
                    "required": fastmcp_tool.inputSchema.get("required", [])
                }
            }
        }
    
    async def call_tool(self, tool_name: str, parameters: dict) -> dict:
        """调用 FastMCP 工具"""
        assert self.tools != {}, "无工具可用,请检查mcp tools配置"
        
        async with mcp_client:
            return (await mcp_client.call_tool(tool_name, parameters)).structured_content

    async def act(self, channel: Channel, stream: bool = False ) -> str:
        """
        自动处理用户输入：分析是否需要调用工具并返回最终响应
        """
        
        if self.tools == []:
            self.tools = await self.load_tools()
        elif isinstance(self.tools, list):
            self.tools = await self.load_tools(self.tools)
        
        responses = []
        while True:
            response = create_chat_completion(
                self._construct_llm_messages(channel=channel), 
                stream=False, 
                tools=list(self.tools.values()),
                tool_choice="auto",  # 让 LLM 自动选择 
                )

            choice = response.choices[0]
            logger.debug(f"vllm_response: {choice}")

            if choice.finish_reason == "tool_calls":
                # LLM 请求调用工具
                tool_calls = choice.message.tool_calls
                tool_call_message = ToolCallMessage(source=self.name, content=choice.message.content, tool_calls=choice.message.tool_calls)
                logger.debug(f"tool_call_message: {tool_call_message}")
                channel.context.append(tool_call_message)
                responses.append(tool_call_message)

                for tool_call in tool_calls: 
                    tool_name = tool_call.function.name
                    tool_args = json.loads(tool_call.function.arguments)

                    # 调用工具
                    tool_result = await self.call_tool(tool_name, tool_args)
                    logger.debug(f"tool_result: {tool_result}")
                    
                    # 将工具调用结果交给 LLM 继续处理
                    tool_call_response_message = ToolCallResponseMessage(source=self.name, tool_name=tool_name, tool_description=self.tools[tool_name]["function"]["description"], parameters=tool_args, tool_call_id=tool_call.id, content=tool_result)
                    logger.debug(f"tool_call_response_message: {tool_call_response_message}")
                    channel.context.append(tool_call_response_message)
                    responses.append(tool_call_response_message)
            else:
                logger.debug(f"choice.finish_reason: {choice.finish_reason}")

                return response, responses
            
        # messages = self._construct_llm_messages(channel=channel)
        # logger.info(f"messages: {messages}")
        # response = create_chat_completion(messages=messages, stream=stream)
        # return response
        
    async def generate_response(self, message: Message=None, stream: bool = False, channel: Union[str, Channel]=None, **kwargs) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        生成响应，支持流式和非流式输出
        
        参数:
            message: 输入消息
            stream: 是否使用流式输出
            channel: 指定channel, 默认channel为Agent自带的同名channel
            **kwargs: 其他传递给LLM的参数
            
        返回:
            非流式: Message对象
            流式: 生成器，产生MessageChunk对象
        """
        if channel is None:
            channel = self.channels[self.name]
        elif isinstance(channel, str):
            if channel not in self.channels:
                raise ValueError(f"Channel {channel} is not subscribed.")
            channel = self.channels[channel]
        
        # 定义一个仅用于act的channel，存放包含工具调用的response
        act_channel = Channel(name=f"{self.name}_act", max_context_length=channel.context.maxlen)
        act_channel.context = channel.context.copy()

        if message:
            act_channel.context.append(message)

        # response: 本轮回答最后一个response: ChatCompletion
        # responses: 本轮回答最后一个reponse之前的responses，包含工具调用，不包含用户输入：List[Message] or []
        response, responses = await self.act(channel=act_channel, stream=stream)
        
        return self._process_response(responses=responses, last_response=response, stream=stream, channel=channel, input_user_message=message)
    
    async def reset(self):
        super().reset()
        self.tools = await self.load_tools()

    