from langchain.chains.llm import LLMChain
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, MessagesPlaceholder, \
    HumanMessagePromptTemplate
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_neo4j import GraphCypherQAChain
from langgraph.graph.state import CompiledStateGraph
from pydantic import BaseModel

from server.openai_chain_server import ai_server_instance
from server.openai_server import create_server
from tools.BaiduSearch import BaiduSearch
from typing import Annotated, Literal, TypedDict
from IPython.display import Image, display
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_core.tools import create_retriever_tool, Tool

server_instance = create_server()

from langchain_core.tools import tool

# system_prompt = """
# You are an intelligent assistant named 'LittleBoss', with the goal of providing accurate and useful information while maintaining friendly and humorous conversations.
# The following is the input provided by the user '{user_id}':
# "{input}"
# You need to provide the following functions:
# If the user's intention is to query information related to herself/himself, please use the 'Neo4j_Personal_Knowledge_Graph' tool to query the knowledge base,
# Else if is searching for basic knowledge or authoritative sources, please use the 'baidu_search' tool to query Baidu,
# Otherwise, call the LLM model to meet the user's needs
# #Constraint
# -Communicate only in the language used by the user.
# -Respect user privacy and do not ask or comment on sensitive personal information unless the user voluntarily shares it.
# -Maintain the coherence of the conversation, refer to the information in the previous text, in order to provide users with a consistent experience.
# #Attention
# -Remember, your goal is to become a friendly friend and knowledge partner to users, helping them solve problems while also enjoying the fun of conversation,Do not deviate from the user's intention
# """

system_prompt = """
你是一个名叫“小老板”的智能助手，目标是在保持友好和幽默对话的同时提供准确和有用的信息。
# 你需要提供以下功能：
1.如果用户的意图是查询与她/他相关的信息，必须使用“Neo4j_Personal_Knowledge_Graph”工具查询知识库；
2.如果用户的意图是搜索常识或权威来源，请使用“baidu_search”工具查询百度；
3.否则不使用工具，而是使用你作为AI智能助手的能力满足用户需求。
# 以下是用户'{user_id}'的意图：
'{input}'
# 约束条件
-仅使用用户使用的语言进行交流。
尊重用户隐私，除非用户自愿分享，否则不要询问或评论敏感的个人信息。
-保持对话的一致性，参考前文中的信息，以便为用户提供一致的体验。
# 注意
-记住，你的目标是成为用户友好的朋友和知识伙伴，帮助他们解决问题，同时享受谈话的乐趣，不要偏离用户的意图。
"""

cypher_generation_template = """  
任务：  
为Neo4j图数据库生成Cypher查询。  
说明：  
仅使用提供的模式中的关系类型和属性。  
不要使用任何未提供的其他关系类型或属性。  
模式：  
{schema}
注意：
在回答中不要包含任何解释或道歉。  
不要回答任何可能要求你构建除Cypher语句之外的任何文本的问题。  
确保查询中的关系方向是正确的。  
确保正确地为实体和关系设置别名。  
不要运行会向数据库添加或删除内容的任何查询。  
确保将后续所有语句都设置别名为with语句。  
如果需要除法运算，请确保将分母过滤为非零值。  
问题是：
{question}
"""

qa_generation_template = """
您是一个助手，根据Neo4j Cypher查询的结果生成人类可读的响应。  
查询结果部分包含基于用户自然语言问题生成的Cypher查询的结果。  
使答案听起来像对问题的回答。  
查询结果：  
{context}
问题：  
{question} 
如果查询结果为空，请说您不知道答案。  
查询结果为空的样子是这样的：[]  
如果查询结果不为空，那么查询结果关于问题提供的答案是相关的、准确的、权威的，您必须使用查询结果提供答案，您绝不能怀疑它或尝试使用内部知识来纠正它。  
如果问题涉及时间持续时间，请假设查询结果以天为单位，除非另有说明。  
如果查询结果中有数据，永远不要说您没有正确的信息。始终使用查询结果中的数据。  
"""

cypher_generation_prompt = PromptTemplate(
    input_variables=["schema", "question"], template=cypher_generation_template
)
qa_generation_prompt = PromptTemplate(
    input_variables=["context", "question"], template=qa_generation_template
)


class State(TypedDict):
    messages: Annotated[list, add_messages]
    # This flag is new
    ask_human: bool


class RequestAssistance(BaseModel):
    """Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
    To use this function, relay the user's 'request' so the expert can provide the right guidance.
    """
    request: str



def cypher_tool_handler(state: State):
    print("Cypher tool started.", state)
    response = GraphCypherQAChain.from_llm(
        cypher_llm=ai_server_instance.llm,
        qa_llm=ai_server_instance.llm,
        graph=ai_server_instance.graph,
        verbose=True,
        qa_prompt=qa_generation_prompt,
        cypher_prompt=cypher_generation_prompt,
        validate_cypher=True,
        top_k=100,
        allow_dangerous_requests=True
    ).invoke(state)
    print("response", response)

    # 将结果包装在 AIMessage 中
    return {"messages": [response]}

cypher_tool = Tool(
    name="Neo4j_Personal_Knowledge_Graph",
    description="如果用户想查询与自己相关的信息，请务必使用此工具",
    func=cypher_tool_handler
)


baidusearch = BaiduSearch()
tools = [baidusearch, cypher_tool]


# Step 1: Generate an AIMessage that may include a tool-call to be sent.
def chatbot(state: State):
    input = next((msg.content for msg in reversed(state["messages"])
                       if isinstance(msg, HumanMessage)), None)
    user_id = next((msg.additional_kwargs.get("user_id") for msg in reversed(state["messages"])
                    if isinstance(msg, HumanMessage)), None)
    print(input, "------------------",state["messages"],"--------------------", user_id )

    """Generate tool call for retrieval or respond."""

    prompt = ChatPromptTemplate.from_messages(
        [SystemMessagePromptTemplate(
            prompt=PromptTemplate(input_variables=['user_id', 'input'], template=system_prompt)),
         MessagesPlaceholder(variable_name='chat_history', optional=True),
         HumanMessagePromptTemplate(
             prompt=PromptTemplate(
                 input_variables=['input'],
                 template="""{input}"""
             )
         ),
        ]
    )
    # 方法1：使用LLMChain
    chain = prompt | server_instance.llm.bind_tools(tools + [RequestAssistance])
    response = chain.invoke({"user_id": user_id, "input": input})
    print("response", response)
    # MessagesState appends messages to state instead of overwriting
    ask_human = False
    if (
            response.tool_calls
            and response.tool_calls[0]["name"] == RequestAssistance.__name__
    ):
        ask_human = True
    return {"messages": [response], "ask_human": ask_human}



def create_response(response: str, ai_message: AIMessage):
    return ToolMessage(
        content=response,
        tool_call_id=ai_message.tool_calls[0]["id"],
    )


def human_node(state: State):
    new_messages = []
    if not isinstance(state["messages"][-1], ToolMessage):
        # Typically, the user will have updated the state during the interrupt.
        # If they choose not to, we will include a placeholder ToolMessage to
        # let the LLM continue.
        new_messages.append(
            create_response("No response from human.", state["messages"][-1])
        )
    return {
        # Append the new messages
        "messages": new_messages,
        # Unset the flag
        "ask_human": False,
    }


class GraphServer:
    _instance = None  # 类级别的变量，存储单例

    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
            # 在这里初始化实例，只有第一次调用时才会执行
            cls._instance.__initialized = False
        return cls._instance

    def __init__(self, graph: CompiledStateGraph, cypher_chain: GraphCypherQAChain, cypher_chain_with_history: RunnableWithMessageHistory):
        self.graph = graph
        self.cypher_chain = cypher_chain
        self.cypher_chain_with_history = cypher_chain_with_history


def select_next_node(state: State):
    if state["ask_human"]:
        return "human"
    
    # 检查最后一条消息是否是工具返回的结果
    last_message = state["messages"][-1]
    print("last_message", last_message)
    if isinstance(last_message, ToolMessage):
        print("__end__")
        return "__end__"  # 如果是工具返回结果，直接结束
        
    # 其他情况按原有逻辑处理
    return tools_condition(state)


def create_graph_server():
    # Build graph
    graph_builder = StateGraph(State)

    # 添加节点
    graph_builder.add_node("chatbot", chatbot)
    graph_builder.add_node("tools", ToolNode(tools))
    graph_builder.add_node("human", human_node)

    # 设置入口点
    graph_builder.set_entry_point("chatbot")

    # 添加边和条件边
    graph_builder.add_conditional_edges(
        "chatbot",
        select_next_node,
        {"human": "human", "tools": "tools", "__end__": "__end__"},
    )
    graph_builder.add_edge("tools", "chatbot")
    graph_builder.add_edge("human", "chatbot")

    memory = MemorySaver()
    graph = graph_builder.compile(
        checkpointer=memory,
        # We interrupt before 'human' here instead.
        interrupt_before=["human"],
    )
    graph.get_graph().draw_mermaid_png(output_file_path="../graph.png")

    return GraphServer(graph, None, None)
