import os
import re
import ast
import json
from datetime import datetime
from src.common import commonUtils
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from typing import TypedDict, List, Dict, Literal
from langgraph.constants import END, START
from langchain_core.prompts import ChatPromptTemplate

logger = getLogger()

class Plan(BaseModel):
    index: int = Field(description = "每一步推理的序号")
    detail: str = Field(description = "每一步推理的详细内容")
    function: str = Field(description = "可用的工具名")
    param: Dict[str, str] = Field(description = "json格式的工具的输入参数")

class PlanList(BaseModel):
    plans: List[Plan] = Field(description = "逐步推理的步骤列表")

class ObserveAct(BaseModel):
    flag: Literal["yes", "no"] = Field(description = "是否已获取或可以回答问题，若是，返回 'yes'，若不是，返回 'no'")
    result: str = Field(description = "根据上下文回答的答案内容")

class ReActState(TypedDict):
    query: str
    steps: List
    results: List[str]
    count: int
    flag: str
    answer: str

class ReActAgent:

    def __init__(self, llm_model, agent_tools, max_step: int = 3):
        self.llm_model = llm_model
        self.agent_tools = agent_tools
        self.max_step = max_step

    def reason_node(self, state):
        logger.info("ReActAgent reason_node start")
        query = state["query"]
        template = """
            当前时间是：{current_date}，请基于当前时间回答问题。
        
            你是一个问题分析推理及规划任务大师，使用ReAct框架逐步解决问题，必须使用中文回答。
            问题：{input}
            
            确保工具调用准确高效，必须严格按照描述使用工具，若工具不适合处理当前问题或没有可用的工具，则不返回工具
            可用的工具：{tool_descs}

            严格根据上下文严谨的回答，如果没有答案或不知道，返回为空
        """
        tool_descs = commonUtils.build_tools_description(self.agent_tools)
        current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        prompt = template.format(current_date = current_date, tool_descs = tool_descs, input = query)
        structure_llm = self.llm_model.with_structured_output(PlanList)
        response = structure_llm.invoke(prompt)
        logger.info(f"ReActAgent reason_node response: {response}")
        return { "steps": response.plans }

    def execute_node(self, state):
        logger.info("ReActAgent execute_node start")
        results = state.get("results", [])
        steps = state.get("steps", [])
        for step in steps:
            logger.info(f"ReActAgent execute_node step: {step}")
            if step.function:
                for tool in self.agent_tools:
                    if step.function == tool.name:
                        result = tool.func(**step.param)
                        results.append(result)
            else:
                results.append(step.detail)
        return { "results": results }

    def observe_node(self, state):
        logger.info("ReActAgent observe_node start")
        query = state["query"]
        results = state.get("results", [])
        template = """
            当前时间是：{current_date}，请基于当前时间回答问题。
        
            你是一个智能AI助手，请根据提供的上下文回答问题，必须使用中文回答。
            
            上下文：{results}
            
            问题：{query}
        """
        prompt = ChatPromptTemplate.from_template(template)
        structure_llm = self.llm_model.with_structured_output(ObserveAct)
        current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        response = (prompt | structure_llm).invoke({ "current_date": current_date, "results": results, "query": query })
        logger.info(f"ReActAgent observe_node response: {response}")
        return { "flag": response.flag, "answer": response.result, "count": state.get("count", 0) + 1 }

    def route(self, state):
        logger.info("ReActAgent route start")
        logger.info(f"ReActAgent route flag: {state["flag"]}, count: {state["count"]}")
        if state["flag"] == "yes" or state["count"] == self.max_step:
            return "__end__"
        else:
            return "reason"

    def build_graph(self):
        graph = StateGraph(ReActState)
        graph.add_node("reason", self.reason_node)
        graph.add_node("action", self.execute_node)
        graph.add_node("observe", self.observe_node)

        graph.add_edge(START, "reason")
        graph.add_edge("reason", "action")
        graph.add_edge("action", "observe")
        graph.add_conditional_edges("observe", self.route, { "__end__": END, "reason": "reason" })
        graph.add_edge("observe", END)

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "Agent_ReAct_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"ReActAgent invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        answer = response.get("answer", None)
        logger.info(f"ReActAgent invoke answer len: {len(str(answer))}")
        return { "chain_result": answer }
