import os
import re
from typing import Literal

from langchain_community.chat_models import ChatZhipuAI
from langchain_core.messages import HumanMessage, RemoveMessage, AnyMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START, END, MessagesState
from langgraph.prebuilt import ToolNode
from typing_extensions import TypedDict

from my_intelligent_customer.utils.chains import StageAnalyzerChain
from my_intelligent_customer.utils.prompts import CUSTOMER_AGENT_INCEPTION_PROMPT, STAGE_DONE_INCEPTION_PROMPT
from my_intelligent_customer.utils.robot_config import CONVERSATION_STAGES_CORRESPONDING_TOOLS, CONVERSATION_STAGES, \
    ROLE_INFO, CONVERSATION_CORE_OBJECTIVE, CONVERSATION_STAGES_CORRESPONDING_PROMPT, tool_node

os.environ["ZHIPUAI_API_KEY"] = "97738d4998b8732d707daf91a2b1c56d.2y6VKEuOlidwHDpI"

memory = MemorySaver()


class State(MessagesState):
    current_user_id: str
    summary: str
    intention: str
    current_conversation_stage_id: str


model = ChatZhipuAI(
    model='glm-4',
    temperature=0
)


# model = ChatOpenAI(base_url="https://api.openai-hk.com/v1",
#                    api_key="hk-0amgwp10000255022bdd816341db25b54dc2e46787aee69f",
#                    model="gpt-4o-mini",
#                    temperature=0)

def get_role_name(role_type: str) -> str:
    if role_type == 'ai':
        return ROLE_INFO.get("name")
    elif role_type == 'human':
        return "用户"
    elif role_type == 'tool':
        return "工具"
    else:
        return role_type


class StageSopDone(TypedDict):
    done: Literal[True, False]


def stage_analyze(state: State):
    print(f"Conversation Stage ID before analysis: {state.get('current_conversation_stage_id', '1')}")
    print("Conversation history:")
    print(state["messages"])

    # 如果当前没有阶段id，说明是刚开始，则返回第一阶段
    if not state.__contains__("current_conversation_stage_id"):
        return {"current_conversation_stage_id": "1"}

    # 分析当前阶段的SOP是否已完成
    # prompt = format_stage_done_prompt(state, STAGE_DONE_INCEPTION_PROMPT)
    # stage_tools = CONVERSATION_STAGES_CORRESPONDING_TOOLS.get(state.get('current_conversation_stage_id', '1'))
    # if stage_tools is None or len(stage_tools) == 0:
    #     done_model = model
    # else:
    #     done_model = model.bind_tools(stage_tools)
    # response = done_model.with_structured_output(StageSopDone).invoke(prompt)
    # if response is not None and response and not response["done"]:
    #     return {"current_conversation_stage_id": state.get('current_conversation_stage_id')}

    stage_analyzer_chain = StageAnalyzerChain.from_llm(model)
    stage_analyzer_output = stage_analyzer_chain.invoke(
        input={
            "summary": state.get("summary", "暂无"),
            "conversation_history": history_context(state['messages']),
            "conversation_stage_id": state.get('current_conversation_stage_id', ''),
            "conversation_stages": "\n".join(
                [
                    "阶段" + str(key) + ": " + str(value) + "\n" +
                    "阶段" + str(key) + "的SOP：\n" + CONVERSATION_STAGES_CORRESPONDING_PROMPT.get(str(key)) + "\n"
                    for key, value in CONVERSATION_STAGES.items()
                ]
            ),
        },
        return_only_outputs=False,
    )
    print("Stage analyzer output")
    print(stage_analyzer_output.get("text"))

    regex_pattern = r"\d+"
    stage_no = re.findall(regex_pattern, stage_analyzer_output.get("text"))[0]
    return {"current_conversation_stage_id": stage_no}


def format_stage_done_prompt(state: State, stage_done_inception_prompt: str):
    prompt = stage_done_inception_prompt.format(
        summary=state.get("summary", "暂无"),
        conversation_history=history_context(state['messages']),
        current_stage=CONVERSATION_STAGES.get(state["current_conversation_stage_id"]),
        stage_sop=CONVERSATION_STAGES_CORRESPONDING_PROMPT.get(state["current_conversation_stage_id"]), )
    return prompt


# 阶段执行
def stage_execute(state: State):
    stage_tools = CONVERSATION_STAGES_CORRESPONDING_TOOLS.get(state.get('current_conversation_stage_id', '1'))
    if stage_tools is None or len(stage_tools) == 0:
        state_model = model
    else:
        state_model = model.bind_tools(stage_tools)
    prompt = format_stage_execute_prompt(state, CUSTOMER_AGENT_INCEPTION_PROMPT)
    response = state_model.invoke(prompt)
    return {"messages": response}


def format_stage_execute_prompt(state: State, sales_agent_inception_prompt: str):
    prompt = sales_agent_inception_prompt.format(
        name=ROLE_INFO["name"],
        role=ROLE_INFO["role"],
        company_name=ROLE_INFO["company_name"],
        company_business=ROLE_INFO["company_business"],
        company_values=ROLE_INFO["company_values"],
        core_objective=CONVERSATION_CORE_OBJECTIVE,
        user_id=state.get("current_user_id", ""),
        current_stage=CONVERSATION_STAGES[state.get("current_conversation_stage_id", "1")],
        stage_sop=CONVERSATION_STAGES_CORRESPONDING_PROMPT[state.get("current_conversation_stage_id", "1")],
        summary=state.get("summary", "暂无"),
        conversation_history=history_context(state['messages']))
    return prompt


def should_continue(state: State):
    last_message = state["messages"][-1]
    if not last_message.tool_calls:
        return "end"
    else:
        return "tool_node" + state.get('current_conversation_stage_id', '1')


sub_work_flow = StateGraph(State)
sub_work_flow.add_node("stage_analyze", stage_analyze)
sub_work_flow.add_node("stage_execute", stage_execute)
path_map = {"end": END}
for stage, tools in CONVERSATION_STAGES_CORRESPONDING_TOOLS.items():
    sub_work_flow.add_node("tool_node" + stage, ToolNode(tools))
    path_map["tool_node" + stage] = "tool_node" + stage

sub_work_flow.add_edge(START, "stage_analyze")
sub_work_flow.add_edge("stage_analyze", "stage_execute")
sub_work_flow.add_conditional_edges("stage_execute",
                                    should_continue,
                                    path_map)
for stage, tools in CONVERSATION_STAGES_CORRESPONDING_TOOLS.items():
    sub_work_flow.add_edge("tool_node" + stage, "stage_execute")

sub_graph = sub_work_flow.compile(checkpointer=memory)


class Intention(TypedDict):
    intention: Literal["退保", "非退保其他保险业务", "结束会话", "打招呼"]


intention_recognition_prompt = """你是一个保险客户专员，能够结合用户的历史消息，识别出最新一条用户消息的核心意图。
历史消息总结:
{summary}

消息上下文
{context}

用户最新的消息
{user_message}
"""


def intention_recognition(state: State):
    prompt = format_prompt(state, intention_recognition_prompt)
    response = model.with_structured_output(Intention).invoke(prompt)
    return {"intention": "其他" if response is None else response["intention"]}


def history_context(messages: list[AnyMessage]) -> str:
    all_context = ''
    tool_context = ''
    is_tool = False
    for message in messages:
        if message.type == 'ai' and message.tool_calls:
            is_tool = True
            tool_name = message.tool_calls[0].get("name")
            tool_context = "工具：调用了工具，工具名称是" + tool_name + "，工具的描述是" + tool_node.tools_by_name[
                tool_name].description
            continue
        if is_tool:
            is_tool = False
            tool_context = tool_context + "，工具返回的结果是" + message.content
            all_context = all_context + tool_context + "\n"
            tool_context = ''
        else:
            all_context = all_context + get_role_name(message.type) + ":" + message.content + "\n"
    return all_context


def format_prompt(state: State, prompt_template: str) -> str:
    prompt = prompt_template.format(
        summary=state.get("summary", "暂无"),
        context=history_context(state['messages']),
        user_message=get_role_name(state["messages"][len(state["messages"]) - 1].type) + ":" + state["messages"][
            len(state["messages"]) - 1].content)
    return prompt


def intention_and_summary_condition(state: State):
    summary = ""
    if state["intention"] == '退保':
        intention_result = "surrender"
    elif state["intention"] == '非退保其他保险业务':
        intention_result = "non_surrender"
    elif state["intention"] == "其他" or state["intention"] == "打招呼":
        intention_result = "salutation"
    else:
        intention_result = "end_session"

    messages = state["messages"]
    if len(messages) > 6:
        summary = "summarize_conversation"

    if summary:
        return [intention_result, summary]
    return [intention_result]


def summarize_conversation(state: State):
    summary = state.get("summary", "")
    context = history_context(state['messages'])
    if summary:
        summary_message = (
            f"""下面是之前历史会话总结的信息: 
{summary}
            
下面是后续新增的会话消息：
{context}
            
请根据上述两部分内容，一起进行总结，总结过程中只能参考上述两部分的信息，并且将关键信息进行总结，也要避免丢失信息。
**重要：务必将调用的工具、工具描述、工具结果在总结中有所展示。**"""
        )
    else:
        summary_message = f"""下面是需要总结的会话消息：
{context}
总结过程中只能参考上述两部分的信息，并且将关键信息进行总结，也要避免丢失信息。
**重要：务必将调用的工具、工具描述、工具结果在总结中有所展示。"""
    messages = [HumanMessage(content=summary_message)]
    response = model.invoke(messages)
    return {"summary": response.content, "messages": [RemoveMessage(id=m.id) for m in state["messages"][:-2]]}


def non_surrender(state: State):
    return {"messages": [("ai", "好的，我帮你转人工客服进行处理")]}


def salutation(state: State):
    return {"messages": [("ai", "你好，我是水滴保险客服小依，有什么可以帮助到您的")]}


def end_session(state: State):
    return {"messages": [("ai", "好的，那我就先完结此次会话了，祝您生活愉快")]}


def surrender(state: State):
    return {"current_user_id": state["current_user_id"]}


work_flow = StateGraph(State)
work_flow.add_node("intention_recognition", intention_recognition)
work_flow.add_node("non_surrender", non_surrender)
work_flow.add_node("end_session", end_session)
work_flow.add_node("salutation", salutation)
work_flow.add_node("surrender", surrender)
work_flow.add_node("summarize_conversation", summarize_conversation)
work_flow.add_node("agent_executor", sub_graph)

work_flow.add_edge(START, "intention_recognition")
work_flow.add_conditional_edges("intention_recognition",
                                intention_and_summary_condition,
                                {"surrender": "surrender",
                                 "non_surrender": "non_surrender",
                                 "salutation": "salutation",
                                 "end_session": "end_session",
                                 "summarize_conversation": "summarize_conversation"})
work_flow.add_edge("surrender", "agent_executor")
work_flow.add_edge("summarize_conversation", END)
work_flow.add_edge("non_surrender", END)
work_flow.add_edge("end_session", END)
work_flow.add_edge("salutation", END)
work_flow.add_edge("agent_executor", END)

graph = work_flow.compile(checkpointer=memory)


# def print_stream(stream):
#     for s in stream:
#         message = s["messages"][-1]
#         if message.type == 'ai':
#             print("AI：" + message.content)
#         # if isinstance(message, tuple):
#         #     print(message)
#         # else:
#         #     message.pretty_print()
#
#
# config = {"configurable": {"thread_id": "1", "recursion_limit": 500}}
# while True:
#     context = input("等待用户输入：")
#     print_stream(
#         graph.stream({"messages": [("user", context)], "current_user_id": "1234567890"}, config, stream_mode="values"))
