import json
from typing import Any, Dict, Generator, Union, List
from .config_loader import config
from .logger import get_logger
import openai
from .channel import Channel
from .message import Message, MessageChunk, ToolCallMessage, ToolCallResponseMessage
from transformers import AutoTokenizer

from fastmcp import Client as MCPClient
from tool import mcp

# 创建日志器
logger = get_logger(__name__)

client = openai.Client(
    base_url=config["base_url"],
    api_key=config["api_key"]
)

mcp_client = MCPClient(mcp)

tokenizer = AutoTokenizer.from_pretrained(config["tokenizer"])

def create_chat_completion(messages, stream=False, **kwargs):
    return client.chat.completions.create(
        model=config["model"],
        messages=messages,
        stream=stream,
        **kwargs
    )

class Agent:
    """ Agent类 """
    def __init__(self, name, system_message, max_context_length=30):
        """ 初始化Agent
        :param name: Agent的名字
        :param system_message: Agent的system message
        :param max_context_length: Agent的context长度, 默认为30
        """
        self.name = name
        self.system_message = system_message
        self.channels: Dict[str, Channel] = {self.name: Channel(self.name, max_context_length)}
        logger.info(f"Agent '{name}' 初始化完成，最大上下文长度: {max_context_length}")
        
    @property
    def context(self):
        """ 
        获取Agent的context. 每个agent的context是自己的默认频道维护的消息队列
        """
        return self.channels[self.name].context
        
    async def generate_response(self, message: Message=None, stream: bool=False, channel: Union[str, Channel]=None, **kwargs) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        生成响应，支持流式和非流式输出. 
        
        参数:
            message: 输入新消息
            stream: 是否使用流式输出
            channel: 指定channel, 默认channel为Agent自带的同名channel
            **kwargs: 其他传递给LLM的参数
            
        返回:
            非流式: Message对象
            流式: 生成器，产生MessageChunk对象
        """
        if channel is None:
            channel = self.channels[self.name]
        elif isinstance(channel, str):
            if channel not in self.channels:
                raise ValueError(f"Channel {channel} does not exist.")
            channel = self.channels[channel]
        if message:
            channel.context.append(message)
            
        llm_messages = self._construct_llm_messages(channel=channel)
        response = create_chat_completion(llm_messages, stream=stream, **kwargs)
        
        return self._process_response(response, stream, channel=channel)
    
    async def reset(self):
        """重置Agent默认channel"""
        self.channels[self.name].reset()
          
    def _process_response(self, responses: List[Message], last_response, stream: bool, channel: Channel, input_user_message: Message=None) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        处理响应，支持流式和非流式
        """
        if not stream:
            # 非流式处理
            content = last_response.choices[0].message.content
            logger.debug(f"output_content: {content}")
            message = Message(source=self.name, content=content)
            responses.append(message)   # 用于展示对用户回答的完整输出（包含工具调用）

            # 提取ToolCallMessage中的content，形成本轮调用完整的llm回答，skip工具调用过程。
            complete_chat_content = ""
            for response in responses:
                if isinstance(response, ToolCallMessage):
                    if response.content:
                        complete_chat_content += response.content.strip() + "\n\n" + '<工具调用完毕>' + '\n\n'
                    else:
                        complete_chat_content += '<工具调用完毕>' + '\n\n'
            complete_chat_content += content

            message_inject_channel = Message(source=self.name, content=complete_chat_content)

            # # 进行上下文整理，将channel里该agent的历史回答与当前回答进行总结。
            # chat_list = []
            # for former_message in channel.context:
            #     if former_message.source == self.name:
            #         # 提取该former_message的content，并删除该former_message
            #         chat_list.append(former_message.content)
            #         channel.context.remove(former_message)
            # chat_list.append(complete_chat_content)
            # # 对该agent的回答历史进行总结
            # summary = self.summarize_chat(chat_list)
            # message_inject_channel = Message(source=self.name, content=summary)

            if input_user_message:
                channel.context.append(input_user_message)
            channel.context.append(message_inject_channel)
            
            
            return responses
        else:
            # 流式处理
            def chunk_generator():
                for former_response in responses:
                    yield former_response
                result = ""  # 将result移到生成器内部
                for chunk in last_response:
                    if chunk.choices[0].delta.content is not None:
                        content_chunk = chunk.choices[0].delta.content
                        result += content_chunk
                        yield MessageChunk(
                            source=self.name,  # 使用self.name保持一致性
                            content=content_chunk
                        )
                if input_user_message:
                    channel.context.append(input_user_message)
                # 流式处理完成后，将完整消息添加到上下文
                channel.context.append(Message(source=self.name, content=result))

            return chunk_generator()     
    
    def _construct_llm_messages(self, channel: Channel) -> list[dict]:
        """
        构建LLM消息, 用于调用LLM接口
        """
        sources = set()
        sources.add(self.name)
        llm_messages = []
        a_context = ""
        for message in channel.context:
            sources.add(message.source)
            if message.source == self.name:
                if a_context != "":
                    llm_messages.append({"role": "user", "content": a_context}) 
                    a_context = "" 
                if isinstance(message, ToolCallMessage):
                    llm_messages.append({
                        "role": "assistant",
                        "content": message.content,
                        "tool_calls": [call.model_dump() for call in message.tool_calls]
                    })
                elif isinstance(message, ToolCallResponseMessage):
                    llm_messages.append({
                        "role": "tool",
                        "tool_call_id": message.tool_call_id,
                        "content": json.dumps(message.content, ensure_ascii=False)
                    })
                elif isinstance(message, Message):
                    llm_messages.append({"role": "assistant", "content": message.content})
            else:
                a_context += str(message) + "\n"   
        if a_context != "":
            llm_messages.append({"role": "user", "content": a_context}) # 最后一次回复到对话结束之间的历史消息, 是对话结束的user message

        # 对llm_messages进行tokenize, 过长截断历史
        for i in range(len(llm_messages)):
            tokenized_messages = tokenizer.apply_chat_template(llm_messages[i:], add_generation_prompt=True, tokenize=True)
            if len(tokenized_messages) <= config["max_history_length"]:
                break
            if i == len(llm_messages) - 1: # 如果最后一个message也过长，则对最后一个message进行截断
                llm_messages[i]["content"] = llm_messages[i]["content"][:config["max_history_length"]]
        llm_messages = llm_messages[i:]
        
        system_message = {"role": "system", "content": f"""你正在参与多Agent对话。当前参与者：{sources}。你的身份是{self.name}。
{self.system_message}
你根据所给历史对话，以你的身份设定继续参与对话。
"""}
        input_messages = [system_message] + llm_messages
        logger.info(f"input_messages: {input_messages}")
        return input_messages

    def summarize_chat(self, chat_list: List[str]) -> str:
        """
        对历史对话进行总结
        """
        total_chat = "\n\n".join(chat_list)
        instruction = {"role": "user", "content": f"""你正在参与多Agent对话，你们是一个团队。
{self.system_message}

现在，团队对话历史中有两段你的历史回答。它们可能是补充关系，也可能是后者对前者的修正关系。请你对这两段历史回答进行总结。要求总结内容尽可能包含两段历史回答中所有的有用信息，以向团队其他成员提供尽可能充分的有用信息。

以下是历史对话：
{total_chat}
"""}
        summary = create_chat_completion(
            messages=[instruction],
            model=config["model"],
        )
        return summary.choices[0].message.content
    
    def subscribe(self, channel: Channel):
        """
        订阅频道. 不可以重复订阅同一个名称的频道. 每个Agent默认已经订阅了与自己名称相同的同名频道.
        """
        if not channel.name in self.channels:
            self.channels[channel.name] = channel
        else:
            raise Exception(f"Agent {self.name} already subscribed to channel {channel.name}")

    def unsubscribe(self, channel: Channel):
        """
        取消订阅频道.
        """
        if channel.name in self.channels:
            del self.channels[channel.name]
        else:
            raise Exception(f"Agent {self.name} not subscribed to channel {channel.name}")
        
class ToolAgent(Agent):
    """ 
    工具Agent, 可以使用MCP工具
    """
    
    def __init__(self, name: str, system_message: str, tools: List[str]=[], max_context_length: int=30):
        """ 初始化Agent
        :param name: Agent的名字
        :param system_message: Agent的system message
        :param max_context_length: Agent的context长度, 默认为30
        :param tools: Agent的工具列表, 默认为空. 如不输入, 则会自动从 FastMCP 获取工具列表
        """
        super().__init__(name, system_message, max_context_length)
        self.mcp_host = config["mcp_host"]
        # if tools:
        #     self.tools = self.load_tools(tools)
        # else:
        #     self.tools = {}
        self.tools = tools
            
    async def load_tools(self, tools_init: List[str] = None) -> Dict[str, Dict[str, Any]]:
        """从 FastMCP 获取工具列表并转换为 OpenAI 工具格式"""
        tools = {}
        async with mcp_client:
            data = await mcp_client.list_tools()
            if not tools_init:
                for tool in data:
                    tools[tool.name] = self.convert_fastmcp_to_openai_tool(tool)
            else:
                for tool in data:
                    if tool.name in tools_init:
                        tools[tool.name] = self.convert_fastmcp_to_openai_tool(tool)
                        tools_init.remove(tool.name)
                if tools_init:
                    logger.info(f"未找到工具: {tools_init}")
            return tools
    
    def convert_fastmcp_to_openai_tool(self, fastmcp_tool):
        """将fastMCP的Tool对象转换为OpenAI兼容的tool格式"""
        return {
            "type": "function",
            "function": {
                "name": fastmcp_tool.name,
                "description": fastmcp_tool.description or "",
                "parameters": {
                    "type": "object",
                    "properties": fastmcp_tool.inputSchema["properties"],
                    "required": fastmcp_tool.inputSchema.get("required", [])
                }
            }
        }
    
    async def call_tool(self, tool_name: str, parameters: dict) -> dict:
        """调用 FastMCP 工具"""
        assert self.tools != {}, "无工具可用,请检查mcp tools配置"
        
        async with mcp_client:
            return (await mcp_client.call_tool(tool_name, parameters)).structured_content

    async def act(self, channel: Channel, stream: bool = False):
        """
        自动处理用户输入：分析是否需要调用工具并返回最终响应
        支持流式输出
        """
        
        if self.tools == []:
            self.tools = await self.load_tools()
        elif isinstance(self.tools, list):
            self.tools = await self.load_tools(self.tools)
        
        if not stream:
            # 非流式处理，保持原有逻辑
            return await self._act_non_stream(channel)
        else:
            # 流式处理
            return self._act_stream(channel)
    
    async def _act_non_stream(self, channel: Channel):
        """非流式处理的act逻辑"""
        responses = []
        while True:
            response = create_chat_completion(
                self._construct_llm_messages(channel=channel), 
                stream=False, 
                tools=list(self.tools.values()),
                tool_choice="auto",
                )

            choice = response.choices[0]
            logger.info(f"vllm_response: {choice}")

            if choice.finish_reason == "tool_calls":
                # LLM 请求调用工具
                tool_calls = choice.message.tool_calls
                tool_call_message = ToolCallMessage(source=self.name, content=choice.message.content, tool_calls=choice.message.tool_calls)
                logger.info(f"tool_call: {tool_call_message}")
                channel.context.append(tool_call_message)
                responses.append(tool_call_message)

                for tool_call in tool_calls: 
                    tool_name = tool_call.function.name
                    tool_args = json.loads(tool_call.function.arguments)

                    # 调用工具
                    tool_result = await self.call_tool(tool_name, tool_args)
                    logger.debug(f"tool_result: {tool_result}")
                    
                    # 将工具调用结果交给 LLM 继续处理
                    tool_call_response_message = ToolCallResponseMessage(source=self.name, tool_name=tool_name, tool_description=self.tools[tool_name]["function"]["description"], parameters=tool_args, tool_call_id=tool_call.id, content=tool_result)
                    logger.info(f"tool_response: {tool_call_response_message}")
                    channel.context.append(tool_call_response_message)
                    responses.append(tool_call_response_message)
            else:
                logger.debug(f"choice.finish_reason: {choice.finish_reason}")
                return response, responses

    async def _act_stream(self, channel: Channel):
        """流式处理的act逻辑"""
        responses = []
        while True:
            response = create_chat_completion(
                self._construct_llm_messages(channel=channel), 
                stream=True, 
                tools=list(self.tools.values()),
                tool_choice="auto",
                )

            # 收集流式响应的内容
            collected_content = ""
            collected_tool_calls = []
            finish_reason = None
            
            # 流式输出LLM的响应
            for chunk in response:
                # logger.info(f"chunk: {chunk}")
                if chunk.choices[0].delta.content is not None:
                    content_chunk = chunk.choices[0].delta.content
                    collected_content += content_chunk
                    yield MessageChunk(source=self.name, content=content_chunk)
                
                if chunk.choices[0].delta.tool_calls:
                    for tool_call_delta in chunk.choices[0].delta.tool_calls:
                        # 确保有足够的tool_calls槽位
                        while len(collected_tool_calls) <= tool_call_delta.index:
                            collected_tool_calls.append({
                                "id": "",
                                "function": {"name": "", "arguments": ""},
                                "type": "function"
                            })
                        
                        if tool_call_delta.id:
                            collected_tool_calls[tool_call_delta.index]["id"] = tool_call_delta.id
                        if tool_call_delta.function:
                            if tool_call_delta.function.name:
                                collected_tool_calls[tool_call_delta.index]["function"]["name"] += tool_call_delta.function.name
                            if tool_call_delta.function.arguments:
                                collected_tool_calls[tool_call_delta.index]["function"]["arguments"] += tool_call_delta.function.arguments
                
                if chunk.choices[0].finish_reason:
                    finish_reason = chunk.choices[0].finish_reason

            logger.info(f"流式响应完成，finish_reason: {finish_reason}")

            if finish_reason == "tool_calls":
                # 需要调用工具
                # 先对arguments进行校验与规范化，避免向下一轮请求写入无效JSON导致400
                from openai.types.chat import ChatCompletionMessageToolCall

                sanitized_tool_calls = []
                parsed_args_list = []
                for tc in collected_tool_calls:
                    raw_args = tc.get("function", {}).get("arguments", "") or ""
                    tool_name_cur = tc.get("function", {}).get("name", "") or ""
                    try:
                        parsed_args = json.loads(raw_args)
                        sanitized_args_str = json.dumps(parsed_args, ensure_ascii=False)
                    except Exception as e:
                        logger.error(f"流式聚合得到的工具参数无法解析，尝试修复vLLM流式工具调用bug。tool={tool_name_cur}, raw_args={raw_args}, error={e}")
                        # 尝试修复vLLM流式输出的List[Dict]结构错误
                        try:
                            parsed_args = self._fix_vllm_streaming_tool_args(raw_args, tool_name_cur)
                            sanitized_args_str = json.dumps(parsed_args, ensure_ascii=False)
                            logger.info(f"成功修复工具参数: {sanitized_args_str}")
                        except Exception as fix_error:
                            logger.error(f"修复工具参数失败，将回退为空对象。fix_error={fix_error}")
                            parsed_args = {}
                            sanitized_args_str = "{}"
                    parsed_args_list.append((tool_name_cur, tc.get("id", ""), parsed_args, sanitized_args_str))
                    sanitized_tool_calls.append({
                        "id": tc.get("id", ""),
                        "function": {"name": tool_name_cur, "arguments": sanitized_args_str},
                        "type": "function"
                    })

                # 将规范化后的 tool_calls 写入上下文
                tool_calls_obj = [
                    ChatCompletionMessageToolCall(
                        id=tc["id"],
                        function=tc["function"],
                        type="function"
                    ) for tc in sanitized_tool_calls
                ]

                tool_call_message = ToolCallMessage(
                    source=self.name,
                    content=collected_content,
                    tool_calls=tool_calls_obj
                )
                logger.info(f"tool_call: {tool_call_message}")
                channel.context.append(tool_call_message)
                responses.append(tool_call_message)

                # 只输出简单的工具调用提示，不包含具体结果
                yield MessageChunk(source=self.name, content="\n\n[正在调用工具...]")

                # 调用工具并写入结果
                for tool_name, call_id, tool_args, _sanitized in parsed_args_list:
                    try:
                        # 输出工具调用状态（不包含结果）
                        yield MessageChunk(source=self.name, content=f"\n• {tool_name}")
                        if tool_name == "quantitative_evaluation":
                            # logger.info(f"tool_args: {tool_args}")
                            tool_args["xiangding"] = self.current_message.content
                        tool_result = await self.call_tool(tool_name, tool_args)
                    except Exception as e:
                        logger.error(f"工具调用失败: {tool_name}, 参数: {tool_args}, 错误: {e}")
                        yield MessageChunk(source=self.name, content=" ❌")
                        tool_call_response_message = ToolCallResponseMessage(
                            source=self.name,
                            tool_name=tool_name,
                            tool_description=self.tools.get(tool_name, {}).get("function", {}).get("description", ""),
                            parameters=tool_args if isinstance(tool_args, dict) else {},
                            tool_call_id=call_id,
                            content={"error": f"工具调用失败: {str(e)}"}
                        )
                        logger.info(f"tool_response_error: {tool_call_response_message}")
                        channel.context.append(tool_call_response_message)
                        responses.append(tool_call_response_message)
                        continue

                    logger.debug(f"tool_result: {tool_result}")
                    yield MessageChunk(source=self.name, content=" ✓")

                    tool_call_response_message = ToolCallResponseMessage(
                        source=self.name,
                        tool_name=tool_name,
                        tool_description=self.tools[tool_name]["function"]["description"],
                        parameters=tool_args,
                        tool_call_id=call_id,
                        content=tool_result
                    )
                    logger.info(f"tool_response: {tool_call_response_message}")
                    channel.context.append(tool_call_response_message)
                    responses.append(tool_call_response_message)

                # 工具调用完成提示
                yield MessageChunk(source=self.name, content="\n[工具调用完成]\n\n")
                
            else:
                # 不需要调用工具，响应完成
                logger.debug(f"choice.finish_reason: {finish_reason}")
                # 返回最终的响应信息供_process_response使用
                # 创建一个模拟的response对象
                class MockResponse:
                    def __init__(self, content, finish_reason):
                        self.choices = [type('obj', (object,), {
                            'message': type('obj', (object,), {'content': content})(),
                            'finish_reason': finish_reason
                        })()]
                
                final_response = MockResponse(collected_content, finish_reason)
                # 流式处理中，我们通过yield返回最终结果
                yield (final_response, responses)
                return
            
        # messages = self._construct_llm_messages(channel=channel)
        # logger.info(f"messages: {messages}")
        # response = create_chat_completion(messages=messages, stream=stream)
        # return response
        
    async def generate_response(self, message: Message=None, stream: bool = False, channel: Union[str, Channel]=None, use_new_channel: bool=False, **kwargs) -> Union[Message, Generator[MessageChunk, None, None]]:
        """
        生成响应，支持流式和非流式输出
        
        参数:
            message: 输入消息
            stream: 是否使用流式输出
            channel: 指定channel, 默认channel为Agent自带的同名channel
            use_new_channel: 是否使用新的channel，默认False
            **kwargs: 其他传递给LLM的参数
            
        返回:
            非流式: Message对象
            流式: 生成器，产生MessageChunk对象
        """

        self.current_message = message

        if channel is None:
            channel = self.channels[self.name]
        elif isinstance(channel, str):
            if channel not in self.channels:
                raise ValueError(f"Channel {channel} is not subscribed.")
            channel = self.channels[channel]
        
        # 定义一个仅用于act的channel，存放包含工具调用的response
        act_channel = Channel(name=f"{self.name}_act", max_context_length=channel.context.maxlen)
        
        # 只有当use_new_channel为False时，代表本轮回答使用channel里的历史，才复制原channel的context到act_channel
        if not use_new_channel:
            for msg in channel.context:
                act_channel.context.append(msg)

        if message:
            act_channel.context.append(message)

        # response: 本轮回答最后一个response: ChatCompletion
        # responses: 本轮回答最后一个reponse之前的responses，包含工具调用，不包含用户输入：List[Message] or []
        
        if not stream:
            # 非流式处理
            response, responses = await self.act(channel=act_channel, stream=stream)
            return self._process_response(responses=responses, last_response=response, stream=stream, channel=channel, input_user_message=message)
        else:
            # 流式处理
            stream_generator = await self.act(channel=act_channel, stream=stream)
            
            async def tool_stream_wrapper():
                collected_content = ""
                responses = []
                final_response = None
                
                async for item in stream_generator:
                    if isinstance(item, MessageChunk):
                        # 流式输出的内容块
                        collected_content += item.content
                        yield item
                    elif isinstance(item, tuple) and len(item) == 2:
                        # 这是最终的(response, responses)元组
                        final_response, responses = item
                        break
                
                # 流式处理完成后，更新channel上下文
                if message:
                    channel.context.append(message)
                
                # 处理responses中的工具调用消息
                complete_chat_content = ""
                for response in responses:
                    if isinstance(response, ToolCallMessage):
                        if response.content:
                            complete_chat_content += response.content.strip() + "\n\n" + '<工具调用完毕>' + '\n\n'
                        else:
                            complete_chat_content += '<工具调用完毕>' + '\n\n'
                
                complete_chat_content += collected_content
                message_inject_channel = Message(source=self.name, content=complete_chat_content)
                channel.context.append(message_inject_channel)
                
                # 返回工具调用信息供API层使用
                yield ("tool_calls_info", responses)
            
            return tool_stream_wrapper()
    
    def _fix_vllm_streaming_tool_args(self, raw_args: str, tool_name: str) -> dict:
        """
        修复vLLM流式输出中工具参数缺失问题
        """

        # logger.info(f"正在尝试修复工具参数，tool={tool_name}, raw_args={raw_args}")
        
        # 针对quantitative_evaluation_forces工具的特殊修复逻辑
        if tool_name == "quantitative_evaluation_forces":
            return self._fix_forces_evaluation_args(raw_args)
        
        # 对于其他工具，尝试通用修复
        import json_repair
        try:
            # 先尝试直接解析，如果成功则不需要修复
            return json_repair.loads(raw_args)
        except:
            # 如果解析失败，返回空字典
            return {}
    
    def _fix_forces_evaluation_args(self, raw_args: str) -> dict:
        """
        专门修复quantitative_evaluation_forces工具的List[Dict]结构被错误展平的问题
        
        例如将：
        {"units_blue": "fighter", "count": 3, "stealth": false, "location_tier": "forward", "units_red": [...]}
        
        修复为：
        {"units_blue": [{"type": "fighter", "count": 3, "stealth": false, "location_tier": "forward"}], "units_red": [...]}
        """

        try:
            # 先尝试直接解析，如果成功且格式正确则直接返回
            parsed = json.loads(raw_args)
            if isinstance(parsed.get("units_blue"), list) and isinstance(parsed.get("units_red"), list):
                return parsed
        except:
            pass
        
        # 开始修复展平的结构
        fixed_args = raw_args
        
        # 修复units_blue的展平问题
        # 查找 "units_blue": "xxx" 模式并替换为 "units_blue": [{"type": "xxx"
        import re
        
        # 匹配 "units_blue": "类型值" 的模式
        blue_pattern = r'"units_blue":\s*"([^"]+)"'
        blue_match = re.search(blue_pattern, fixed_args)
        
        if blue_match:
            unit_type = blue_match.group(1)
            # 替换为正确的List[Dict]格式开头
            fixed_args = re.sub(blue_pattern, f'"units_blue": [{{"type": "{unit_type}"', fixed_args)
            
            # 找到units_red的位置，在其前面添加闭合括号
            red_pattern = r'"],\s*"units_red"'
            if re.search(red_pattern, fixed_args):
                fixed_args = re.sub(red_pattern, '"}], "units_red"', fixed_args)
        

        # 找到blue_capabilities或red_capabilities的位置，在其前面添加闭合括号
        capabilities_pattern = r'"],\s*"blue_capabilities"'
        if re.search(capabilities_pattern, fixed_args):
            fixed_args = re.sub(capabilities_pattern, '"}], "blue_capabilities"', fixed_args)

        # 最后加上}
        if fixed_args[-2:] != '}}':
            fixed_args += '}'
        
        # 尝试解析修复后的字符串
        try:
            parsed = json.loads(fixed_args)
            logger.info(f"修复后的参数解析成功")
            return parsed
        except Exception as e:
            logger.error(f"修复后的参数仍无法解析: {fixed_args}, 错误: {e}")
            # 返回一个基本的空结构
            return {
                "units_blue": [],
                "units_red": [],
                "blue_capabilities": {},
                "red_capabilities": {}
            }
            


    async def reset(self):
        super().reset()
        self.tools = await self.load_tools()

    