import asyncio
import json
import sys
import os
import traceback
from typing import AsyncGenerator
from datetime import datetime, timedelta
from decimal import Decimal

# 添加项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from Agent.JudgeAgent import JudgeAgentInputSchema, JudgeAgentOutputSchema, FinalResponseSchema
from Agent.AgentTools import execute_tool, get_tools
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
from atomic_agents.lib.components.agent_memory import AgentMemory
import openai
import instructor
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

class CustomJSONEncoder(json.JSONEncoder):
    """自定义JSON编码器，处理特殊数据类型"""
    def default(self, obj):
        if isinstance(obj, timedelta):
            # 将timedelta转换为秒数
            return obj.total_seconds()
        elif isinstance(obj, datetime):
            # 将datetime转换为ISO格式字符串
            return obj.isoformat()
        elif isinstance(obj, Decimal):
            # 将Decimal转换为float
            return float(obj)
        elif hasattr(obj, '__dict__'):
            # 对于其他对象，尝试序列化其__dict__
            return obj.__dict__
        return super().default(obj)

def safe_json_dumps(obj, **kwargs):
    """安全的JSON序列化函数"""
    return json.dumps(obj, cls=CustomJSONEncoder, ensure_ascii=False, **kwargs)

def format_sse(data: str, event: str = None) -> str:
    """将数据格式化为 SSE 消息"""
    msg = f'data: {data}\n\n'
    if event is not None:
        msg = f'event: {event}\n{msg}'
    return msg


def create_judge_agent():
    """创建Judge Agent实例"""
    API_KEY = os.getenv("GEMINI_API_KEY")
    if not API_KEY:
        raise ValueError("GEMINI_API_KEY not found in environment variables")
    
    # 获取工具映射
    tools = get_tools()
    tool_schema_to_class_map = {
        ToolClass.input_schema: ToolClass for ToolClass in tools if hasattr(ToolClass, "input_schema")
    }
    
    memory = AgentMemory()
    
    agent = BaseAgent(
        BaseAgentConfig(
            client=instructor.from_openai(openai.OpenAI(
                base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
                api_key=API_KEY,
            )),
            memory=memory,
            model="gemini-2.5-flash",
            system_prompt_generator=SystemPromptGenerator(
                background = [
                    "You are a CTF Performance Evaluation Agent specializing in comprehensive assessment of user performance.",
                    "You have access to multiple analysis tools and should use them strategically to provide thorough evaluations.",
                    "Your goal is to provide fair, objective, and actionable feedback based on data-driven analysis.",
                    "You can execute multiple tools in sequence to gather comprehensive data before providing final responses."
                ],
                steps = [
                    "1. Parse the user's evaluation request and identify required analysis dimensions.",
                    "2. Determine the optimal sequence of tools to gather necessary data.",
                    "3. Execute tools in logical order, using results from previous tools to inform subsequent calls.",
                    "4. Once sufficient data is collected, use FinalResponseSchema to provide comprehensive analysis.",
                    "5. Always validate tool parameters before execution and handle errors gracefully."
                ],
                output_instructions = [
                    "1. Always explain your reasoning clearly before taking action.",
                    "2. Use multiple tools when necessary to provide comprehensive analysis.",
                    "3. Return FinalResponseSchema only when you have sufficient data to answer the user's query.",
                    "4. Ensure all tool parameters are valid and complete.",
                    "5. Provide specific, actionable feedback in final responses.",
                    "6. Maintain professional and constructive tone throughout."
                ]
            ),
            input_schema=JudgeAgentInputSchema,
            output_schema=JudgeAgentOutputSchema,
        )
    )
    
    return agent, tool_schema_to_class_map


async def run_judge_flow(query: str, max_iterations: int = 5) -> AsyncGenerator[str, None]:
    """
    一个异步生成器，用于运行JudgeAgent并实时产生(yield)结果。
    """
    try:
        # 创建Judge Agent
        judge_agent, tool_schema_to_class_map = create_judge_agent()
        
        start_message = {"log": f"开始分析查询: {query}"}
        yield format_sse(safe_json_dumps(start_message), "log")
        await asyncio.sleep(0.1)

        current_query = JudgeAgentInputSchema(query=query)
        
        for iteration in range(max_iterations):
            iteration_log = {
                "log": f"--- [迭代 {iteration + 1}/{max_iterations}] ---",
                "details": f"当前查询: {query[:100]}..."
            }
            yield format_sse(safe_json_dumps(iteration_log), "log")
            await asyncio.sleep(0.1)

            try:
                # 获取Agent输出
                agent_output: JudgeAgentOutputSchema = judge_agent.run(current_query)
                
                # 发送推理过程
                reasoning_data = {
                    "agent": "Judge Agent",
                    "type": "reasoning",
                    "content": agent_output.reasoning
                }
                yield format_sse(safe_json_dumps(reasoning_data), "reasoning")
                await asyncio.sleep(0.1)

                action_instance = agent_output.action

                # 检查是否是最终响应
                if isinstance(action_instance, FinalResponseSchema):
                    final_response_data = {
                        "agent": "Judge Agent",
                        "type": "final_response",
                        "content": action_instance.response
                    }
                    yield format_sse(safe_json_dumps(final_response_data), "final_response")
                    
                    completion_log = {"log": "分析完成"}
                    yield format_sse(safe_json_dumps(completion_log), "log")
                    return

                # 执行工具
                schema_type = type(action_instance)
                ToolClass = tool_schema_to_class_map.get(schema_type)
                
                if not ToolClass:
                    error_message = {
                        "error": f"Unknown schema type: {schema_type.__name__}",
                        "details": "工具类型未找到"
                    }
                    yield format_sse(safe_json_dumps(error_message), "error")
                    break

                tool_name = ToolClass.__name__
                tool_log = {
                    "log": f"执行工具: {tool_name}",
                    "parameters": action_instance.model_dump()
                }
                yield format_sse(safe_json_dumps(tool_log), "tool_execution")
                await asyncio.sleep(0.1)

                # 执行工具并获取结果
                params = action_instance.model_dump()
                tool_output = execute_tool(tool_name, **params)

                # 格式化工具输出
                if hasattr(tool_output, 'model_dump'):
                    tool_result_dict = tool_output.model_dump()
                    tool_result_str = safe_json_dumps(tool_result_dict, indent=2)
                else:
                    tool_result_dict = {"result": str(tool_output)}
                    tool_result_str = str(tool_output)

                tool_result_data = {
                    "agent": tool_name,
                    "type": "tool_result",
                    "content": tool_result_dict,
                    "formatted": tool_result_str
                }
                yield format_sse(safe_json_dumps(tool_result_data), "tool_result")
                await asyncio.sleep(0.1)

                # 添加工具结果到内存
                tool_result_message = f"Tool {tool_name} executed with result: {tool_result_str}"
                judge_agent.memory.add_message("assistant", JudgeAgentInputSchema(query=tool_result_message))

                # 创建下一轮查询
                current_query = JudgeAgentInputSchema(
                    query=f"Based on the tool execution result from {tool_name}: {tool_result_str}. "
                          f"Please either use another tool if more data is needed, or provide a comprehensive final response to the original query: {query}"
                )

            except Exception as e:
                error_message = {
                    "error": f"迭代 {iteration + 1} 中发生错误",
                    "details": str(e),
                    "traceback": traceback.format_exc()
                }
                yield format_sse(safe_json_dumps(error_message), "error")
                
                if iteration == max_iterations - 1:
                    break
                continue

        # 如果达到最大迭代次数
        timeout_message = {
            "log": f"达到最大迭代次数 ({max_iterations})，分析终止",
            "type": "timeout"
        }
        yield format_sse(safe_json_dumps(timeout_message), "timeout")

    except Exception as e:
        error_message = {
            "error": "Judge Agent 运行中发生严重错误",
            "details": str(e),
            "traceback": traceback.format_exc()
        }
        yield format_sse(safe_json_dumps(error_message), "error")
        traceback.print_exc()