import os
from datetime import datetime

from src.common import commonUtils
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from typing import TypedDict, Dict, List
from langgraph.constants import START, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class Plan(BaseModel):
    index: int = Field(description = "计划序号")
    detail: str = Field(description = "计划详情描述")
    funcName: str = Field(description = "可用的工具名")
    funcParam: Dict[str, str] = Field(description = "json格式的工具的输入参数")

class ReflexionState(TypedDict):
    query: str
    plan: Plan
    messages: List[str]
    answer: str

class ReflexionAgent:

    def __init__(self, llm_model, agent_tools, max_iter):
        self.llm_model = llm_model
        self.agent_tools = agent_tools
        self.max_iter = max_iter

    def response_node(self, state: ReflexionState):
        logger.info("ReflexionAgent response_node start")
        template = """
            当前时间：{current_date}
        
            你是一个问题分析专家。请根据用户问题，参考提供的可用工具生成一个清晰的指令。
            
            用户问题：{question}
            可用的工具：{tool_descs}
        """
        current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        tool_descs = commonUtils.build_tools_description(self.agent_tools)
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model.with_structured_output(Plan)
        response = chain.invoke({ "current_date": current_date, "question": state["query"], "tool_descs": tool_descs })
        logger.info(f"ReflexionAgent response_node response: {response}")
        return { "plan": response }

    def execute_node(self, state: ReflexionState):
        logger.info("ReflexionAgent execute_node start")
        plan = state["plan"]
        content = None
        if plan.funcName:
            for tool in self.agent_tools:
                if tool.name == plan.funcName:
                    content = tool.func(*plan.funcParam)

        template = """
            当前时间：{current_date}
        
            你是一个问题分析回答专家。请严格按照提供的上下文和用户问题解答用户问题。

            上下文：{context}
            用户问题：{question}
            
            请用中文给出详尽的、准确的最终答案。
        """
        current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "current_date": current_date, "context": content, "question": state["query"] })
        logger.info(f"ReflexionAgent execute_node response len: {len(response)}")
        return { "answer": response }

    def revise_node(self, state: ReflexionState):
        logger.info("ReflexionAgent revise_node start")

        template = """
            当前时间：{current_date}
        
            你是一个问题分析优化专家。请根据用户问题、提供的执行指令及生成的初步答案，用可用的工具对生成指令进行细致的优化，并给出新的优化后的指令。
            
            用户问题：{question}
            执行指令：{order_plan}
            可用的工具：{tool_descs}
            初步答案：{answer}
        """
        current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        tool_descs = commonUtils.build_tools_description(self.agent_tools)
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model.with_structured_output(Plan)
        response = chain.invoke({ "current_date": current_date, "question": state["query"], "order_plan": state["plan"], "tool_descs": tool_descs, "answer": state["answer"] })
        logger.info(f"ReflexionAgent revise_node response: {response}")
        messages = state.get("messages", [])
        messages.append(response)
        return { "messages": messages }

    def route_node(self, state: ReflexionState):
        logger.info("ReflexionAgent route_node start")
        if len(state["messages"]) >= self.max_iter:
            return "__end__"
        return "execute"

    def build_graph(self):
        logger.info("ReflexionAgent build_graph start")
        graph = StateGraph(ReflexionState)
        graph.add_node("response", self.response_node)
        graph.add_node("execute", self.execute_node)
        graph.add_node("revise", self.revise_node)

        graph.add_edge(START, "response")
        graph.add_edge("response", "execute")
        graph.add_edge("execute", "revise")
        graph.add_conditional_edges("revise", self.route_node, { "__end__": END, "execute": "execute" })

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "Agent_Reflexion_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"ReflexionAgent invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({ "query": query })
        answer = response.get("answer", None)
        logger.info(f"ReflexionAgent invoke answer len: {len(str(answer))}")
        return { "chain_result": answer }
