import os
from langgraph.types import Command
from typing import TypedDict, Literal
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from langgraph.constants import START, END
from langchain_core.prompts import ChatPromptTemplate
from src.agentic.agent.multi.StockAgent import StockAgent
from src.agentic.agent.multi.SearchAgent import SearchAgent
from src.agentic.agent.multi.WeatherAgent import WeatherAgent
from src.agentic.agent.multi.CalculateAgent import CalculateAgent

logger = getLogger()

class HierarchicalState(TypedDict):
    query: str
    document: str
    answer: str

class HierarchicalAct(BaseModel):
    agent: Literal["calculate_supervisor", "weather_supervisor", "stock_supervisor", "search_supervisor", "finish"] = Field(description = "根据用户输入问题，判断需要调用的agent，只许返回一个节点")

class HierarchicalAgentic:

    def __init__(self, llm_model, agent_tools):
        self.llm_model = llm_model
        self.agent_tools = agent_tools

    def supervisor_node(self, state: HierarchicalState) -> Command[Literal["calculate_supervisor", "weather_supervisor", "stock_supervisor", "search_supervisor", "__end__"]]:
        logger.info(f"HierarchicalAgentic supervisor_node start")
        query = state["query"]
        answer = state.get("answer", None)
        logger.info(f"HierarchicalAgentic supervisor_node answer: {answer}")
        route_template = """
            你是一位精准的问题-答案分析专家，请严格按照要求分析并解答，判断当前应归属的节点类型
            
            根据用户问题和答案，返回下列节点中的一个：
            calculate: 用于执行数学计算，只能用于数值计算
            weather: 用于查询城市当天及未来的实时天气情况，禁止用来查询过去时间的天气情况
            stock: 用于查询沪深300指数指定股票的开盘/收盘价
            search: 用于搜索真实世界的信息，如事实、人物、地点等，且不属于上述三类时使用
            finish: 答案不能为空，且满足以下任一条件时必须返回'finish'：
                1、答案包含与用户问题相关的关键词
                2、答案与用户问题语义含义一致
            请直接输出节点名称（如：search），不要附加任何解释或标点，禁止推测或扩展。

            用户问题： {question}
            答案：{answer}
        """
        route_prompt = ChatPromptTemplate.from_template(route_template)
        route_chain = route_prompt | self.llm_model.with_structured_output(HierarchicalAct)
        route_result = route_chain.invoke({ "question": query, "answer": answer })
        logger.info(f"HierarchicalAgentic supervisor_node route_result: {route_result}")
        goto = route_result.agent
        if goto == "finish":
            goto = END
        return Command(goto = route_result.agent, update = { "agent": goto })

    def calculate_supervisor_node(self, state: HierarchicalState) -> Command[Literal["supervisor"]]:
        logger.info(f"HierarchicalAgentic calculate_node start")
        agent = CalculateAgentic(self.llm_model, self.agent_tools)
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def weather_supervisor_node(self, state: HierarchicalState) -> Command[Literal["supervisor"]]:
        logger.info(f"HierarchicalAgentic weather_node start")
        agent = WeatherAgentic(self.llm_model, self.agent_tools)
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def stock_supervisor_node(self, state: HierarchicalState) -> Command[Literal["supervisor"]]:
        logger.info(f"HierarchicalAgentic stock_node start")
        agent = StockAgentic(self.llm_model, self.agent_tools)
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def search_supervisor_node(self, state: HierarchicalState) -> Command[Literal["supervisor"]]:
        logger.info(f"HierarchicalAgentic search_node start")
        agent = SearchAgentic(self.llm_model, self.agent_tools)
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def build_graph(self):
        logger.info(f"HierarchicalAgentic build_graph start")
        graph = StateGraph(HierarchicalState)
        graph.add_node("supervisor", self.supervisor_node)
        graph.add_node("calculate_supervisor", self.calculate_supervisor_node)
        graph.add_node("weather_supervisor", self.weather_supervisor_node)
        graph.add_node("stock_supervisor", self.stock_supervisor_node)
        graph.add_node("search_supervisor", self.search_supervisor_node)

        graph.add_edge(START, "supervisor")

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "Agentic_Hierarchical_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"HierarchicalAgentic invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({ "query": query })
        answer = response.get("answer", None)
        logger.info(f"HierarchicalAgentic invoke answer len: {len(answer)}")
        return { "retrieve_docs": response["document"], "chain_result": answer }


class CalculateState(TypedDict):
    query: str
    document: str
    answer: str

class CalculateAct(BaseModel):
    agent: Literal["calculate", "search", "finish"] = Field(description = "根据用户输入问题，判断需要调用的agent，只许返回一个节点")

class CalculateAgentic:

    def __init__(self, llm_model, agent_tools):
        self.llm_model = llm_model
        self.agent_tool = { tool.name: tool for tool in agent_tools }

    def supervisor_node(self, state: CalculateState) -> Command[Literal["calculate", "search", "__end__"]]:
        logger.info(f"CalculateAgentic supervisor_node start")
        query = state["query"]
        answer = state.get("answer", None)
        logger.info(f"CalculateAgentic supervisor_node answer: {answer}")
        route_template = """
            你是一位问题分析高手。
            
            根据用户问题，返回下列节点中的一个：
            finish: 答案包含与用户问题相关的关键词或答案与用户问题语义含义一致
            calculate: 用于执行数学计算，只能用于数值计算
            search: 用于搜索真实世界的信息，如事实、人物、地点等

            用户问题： {question}
            答案：{answer}
        """
        route_prompt = ChatPromptTemplate.from_template(route_template)
        route_chain = route_prompt | self.llm_model.with_structured_output(CalculateAct)
        route_result = route_chain.invoke({ "question": query, "answer": answer})
        logger.info(f"CalculateAgentic supervisor_node route_result: {route_result}")
        goto = route_result.agent
        if goto == "finish":
            goto = END
        return Command(goto = route_result.agent, update = { "agent": goto })

    def calculate_node(self, state: CalculateState) -> Command[Literal["supervisor"]]:
        logger.info(f"CalculateAgentic calculate_node start")
        agent = CalculateAgent(self.llm_model, self.agent_tool["calculate_numerical"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def search_node(self, state: CalculateState) -> Command[Literal["supervisor"]]:
        logger.info(f"CalculateAgentic search_node start")
        agent = SearchAgent(self.llm_model, self.agent_tool["search_web"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def build_graph(self):
        logger.info(f"CalculateAgentic build_graph start")
        graph = StateGraph(CalculateState)
        graph.add_node("supervisor", self.supervisor_node)
        graph.add_node("calculate", self.calculate_node)
        graph.add_node("search", self.search_node)

        graph.add_edge(START, "supervisor")

        workflow = graph.compile()
        return workflow

    def invoke(self, query):
        logger.info(f"CalculateAgentic invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        return response


class WeatherState(TypedDict):
    query: str
    document: str
    answer: str

class WeatherAct(BaseModel):
    agent: Literal["weather", "search", "finish"] = Field(description = "根据用户输入问题，判断需要调用的agent，只许返回一个节点")

class WeatherAgentic:

    def __init__(self, llm_model, agent_tools):
        self.llm_model = llm_model
        self.agent_tool = { tool.name: tool for tool in agent_tools }

    def supervisor_node(self, state: WeatherState) -> Command[Literal["weather", "search", "__end__"]]:
        logger.info(f"WeatherAgentic supervisor_node start")
        query = state["query"]
        answer = state.get("answer", None)
        logger.info(f"WeatherAgentic supervisor_node answer: {answer}")
        route_template = """
            你是一位问题分析高手。
            
            根据用户问题，返回下列节点中的一个：
            finish: 答案包含与用户问题相关的关键词或答案与用户问题语义含义一致
            weather: 用于查询城市当天及未来的实时天气情况，禁止用来查询过去时间的天气情况
            search: 用于搜索真实世界的信息，如事实、人物、地点等

            用户问题： {question}
            答案：{answer}
        """
        route_prompt = ChatPromptTemplate.from_template(route_template)
        route_chain = route_prompt | self.llm_model.with_structured_output(WeatherAct)
        route_result = route_chain.invoke({ "question": query, "answer": answer})
        logger.info(f"WeatherAgentic supervisor_node route_result: {route_result}")
        goto = route_result.agent
        if goto == "finish":
            goto = END
        return Command(goto=route_result.agent, update={"agent": goto})

    def weather_node(self, state: WeatherState) -> Command[Literal["supervisor"]]:
        logger.info(f"WeatherAgentic weather_node start")
        agent = WeatherAgent(self.llm_model, self.agent_tool["search_weather"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def search_node(self, state: WeatherState) -> Command[Literal["supervisor"]]:
        logger.info(f"WeatherAgentic search_node start")
        agent = SearchAgent(self.llm_model, self.agent_tool["search_web"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def build_graph(self):
        logger.info(f"WeatherAgentic build_graph start")
        graph = StateGraph(WeatherState)
        graph.add_node("supervisor", self.supervisor_node)
        graph.add_node("weather", self.weather_node)
        graph.add_node("search", self.search_node)

        graph.add_edge(START, "supervisor")

        workflow = graph.compile()
        return workflow

    def invoke(self, query):
        logger.info(f"WeatherAgentic invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        return response


class StockState(TypedDict):
    query: str
    document: str
    answer: str

class StockAct(BaseModel):
    agent: Literal["stock", "search", "finish"] = Field(description = "根据用户输入问题，判断需要调用的agent，只许返回一个节点")

class StockAgentic:

    def __init__(self, llm_model, agent_tools):
        self.llm_model = llm_model
        self.agent_tool = { tool.name: tool for tool in agent_tools }

    def supervisor_node(self, state: StockState) -> Command[Literal["stock", "search", "__end__"]]:
        logger.info(f"StockAgentic supervisor_node start")
        query = state["query"]
        answer = state.get("answer", None)
        logger.info(f"StockAgentic supervisor_node answer: {answer}")
        route_template = """
            你是一位问题分析高手。
            
            根据用户问题，返回下列节点中的一个：
            finish: 答案包含与用户问题相关的关键词或答案与用户问题语义含义一致
            stock: 用于查询沪深300指数指定股票的开盘/收盘价
            search: 用于搜索真实世界的信息，如事实、人物、地点等

            用户问题： {question}
            答案：{answer}
        """
        route_prompt = ChatPromptTemplate.from_template(route_template)
        route_chain = route_prompt | self.llm_model.with_structured_output(StockAct)
        route_result = route_chain.invoke({ "question": query, "answer": answer})
        logger.info(f"StockAgentic supervisor_node route_result: {route_result}")
        goto = route_result.agent
        if goto == "finish":
            goto = END
        return Command(goto = route_result.agent, update = { "agent": goto })

    def stock_node(self, state: StockState) -> Command[Literal["supervisor"]]:
        logger.info(f"StockAgentic stock_node start")
        agent = StockAgent(self.llm_model, self.agent_tool["snatch_stock_price"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = {"document": response["document"], "answer": response["answer"] })

    def search_node(self, state: StockState) -> Command[Literal["supervisor"]]:
        logger.info(f"StockAgentic search_node start")
        agent = SearchAgent(self.llm_model, self.agent_tool["search_web"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = {"document": response["document"], "answer": response["answer"] })

    def build_graph(self):
        logger.info(f"StockAgentic build_graph start")
        graph = StateGraph(StockState)
        graph.add_node("supervisor", self.supervisor_node)
        graph.add_node("stock", self.stock_node)
        graph.add_node("search", self.search_node)

        graph.add_edge(START, "supervisor")

        workflow = graph.compile()
        return workflow

    def invoke(self, query):
        logger.info(f"StockAgentic invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        return response


class SearchState(TypedDict):
    query: str
    document: str
    answer: str

class SearchAct(BaseModel):
    agent: Literal["stock", "search", "finish"] = Field(
        description="根据用户输入问题，判断需要调用的agent，只许返回一个节点")

class SearchAgentic:

    def __init__(self, llm_model, agent_tools):
        self.llm_model = llm_model
        self.agent_tool = { tool.name: tool for tool in agent_tools }

    def supervisor_node(self, state: SearchState) -> Command[Literal["search", "__end__"]]:
        logger.info(f"SearchAgentic supervisor_node start")
        query = state["query"]
        answer = state.get("answer", None)
        logger.info(f"SearchAgentic supervisor_node answer: {answer}")
        route_template = """
            你是一位问题分析高手。
            
            根据用户问题，返回下列节点中的一个：
            finish: 答案包含与用户问题相关的关键词或答案与用户问题语义含义一致
            search: 用于搜索真实世界的信息，如事实、人物、地点等

            用户问题： {question}
            答案：{answer}
        """
        route_prompt = ChatPromptTemplate.from_template(route_template)
        route_chain = route_prompt | self.llm_model.with_structured_output(SearchAct)
        route_result = route_chain.invoke({ "question": query, "answer": answer})
        logger.info(f"SearchAgentic supervisor_node route_result: {route_result}")
        goto = route_result.agent
        if goto == "finish":
            goto = END
        return Command(goto = route_result.agent, update = { "agent": goto })

    def search_node(self, state: SearchState) -> Command[Literal["supervisor"]]:
        logger.info(f"SearchAgentic search_node start")
        agent = SearchAgent(self.llm_model, self.agent_tool["search_web"])
        response = agent.invoke(state["query"])
        return Command(goto = "supervisor", update = { "document": response["document"], "answer": response["answer"] })

    def build_graph(self):
        logger.info(f"SearchAgentic build_graph start")
        graph = StateGraph(SearchState)
        graph.add_node("supervisor", self.supervisor_node)
        graph.add_node("search", self.search_node)

        graph.add_edge(START, "supervisor")

        workflow = graph.compile()
        return workflow

    def invoke(self, query):
        logger.info(f"SearchAgentic invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        return response
