import functools
import operator
import os
from typing import TypedDict, Annotated, Sequence

import jsonlines
from langchain_core.messages import (
    HumanMessage,
    AIMessage,
    BaseMessage,
)
from langgraph.graph import StateGraph, END
from tqdm import tqdm

from agents.create_agent import create_agent, company_prompt, law_prompt
from llm.glm_llm import glm
from llm.judge import judge_chain
from llm.router_chain import router_chain
from tools import com_info_tools, com_register_tools, sub_com_info_tools, law_tools
from utils import read_jsonl

all_tools = []

all_tools.extend(com_info_tools)
all_tools.extend(com_register_tools)
all_tools.extend(sub_com_info_tools)
all_tools.extend(law_tools)

company_toolkit = []

company_toolkit.extend(com_info_tools)
company_toolkit.extend(com_register_tools)
company_toolkit.extend(sub_com_info_tools)

law_toolkit = []

law_toolkit.extend(law_tools)

company_agent = create_agent(glm, company_toolkit, company_prompt)
law_agent = create_agent(glm, law_toolkit, law_prompt)
default_agent = create_agent(glm, all_tools)


# The agent state is the input to each node in the graph
class AgentState(TypedDict):
    # The annotation tells the graph that new messages will always
    # be added to the current states
    messages: Annotated[Sequence[BaseMessage], operator.add]
    # The topic of the current conversation [company, law]
    topic: str


def route_node(state: AgentState):
    result = router_chain.invoke({"question": state["messages"][0].content})
    topic: str
    if result == "company":
        topic = "company"
    elif result == "law":
        topic = "law"
    elif "company" in result:
        topic = "company"
    elif "law" in result:
        topic = "law"
    else:
        topic = "default"

    return {"topic": topic}


def judge_node(state: AgentState):
    if len(state["messages"]) >= 6:  # 防止多次循环
        return {"messages": []}

    question = state["messages"][0].content
    answer = state["messages"][-1].content
    result = judge_chain.invoke({"conversation": f"Human: {question}\nAI: {answer}"})
    if result.get("valid", "false") == "false":
        return {"messages": [HumanMessage(content=result["comment"])]}
    else:
        return {"messages": []}


def should_end(state: AgentState):
    if isinstance(
        state["messages"][-1], AIMessage
    ):  # 如果judge_node判断合理,则不会附加一条HumanMessage,那么最后一条消息就是AIMessage,此时应该结束
        return END
    else:
        return state["topic"]


def agent_node(state, agent):
    result = agent.invoke(state)
    return {"messages": [AIMessage(content=result["output"])]}


graph = StateGraph(AgentState)
graph.add_node("law", functools.partial(agent_node, agent=law_agent))
graph.add_node("company", functools.partial(agent_node, agent=company_agent))
graph.add_node("default", functools.partial(agent_node, agent=default_agent))
graph.add_node("route", route_node)
graph.add_node("judge", judge_node)

graph.add_conditional_edges(
    "route",
    lambda state: state.get("topic", "default").lower(),
    {
        "company": "company",
        "law": "law",
        "default": "default",
    },
)
graph.add_conditional_edges("judge", should_end)
graph.add_edge("company", "judge")
graph.add_edge("law", "judge")
graph.add_edge("default", "judge")
graph.set_entry_point("route")

app = graph.compile()

# app.get_graph().print_ascii()

if __name__ == "__main__":

    question_file = "./data/question.jsonl"
    # 修改输出文件
    result_file = f"data/answer.jsonl"
    queries = read_jsonl(question_file)

    # 生成答案
    print("Start generating answers...")

    for query in tqdm(queries):
        # 如果中断，可以从这里开始
        if query["id"] < 114:
            continue
        response = app.invoke({"messages": [HumanMessage(content=query["question"])]})
        content = {
            "id": query["id"],
            "question": query["question"],
            "answer": response["messages"][-1].content,
        }
        with jsonlines.open(result_file, "a") as json_file:
            json_file.write(content)
