from pprint import pprint
from typing import Literal, TypedDict

from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool, create_retriever_tool
from langgraph.graph import StateGraph, START, END,MessagesState
from langgraph.graph.message import add_messages
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver

from config.embedding_config import get_openai_embeddings_xin
from config.model_config import get_chat_openai_zhipu, get_chat_openai_zhipu_flash_250414

# 1. 配置环境
PERSIST_DIR = "./chroma_db"  # 持久化目录

# 3. 初始化嵌入模型
embeddings = get_openai_embeddings_xin()

# 加载已有向量库
vectorstore = Chroma(
    persist_directory=PERSIST_DIR,
    embedding_function=embeddings
)

# 将向量数据库转换为检索器
retriever = vectorstore.as_retriever(
    # 设置检索类型为"similarity_score_threshold"，表示使用相似度分数阈值进行检索
    search_type="similarity_score_threshold",
    # 设置检索参数，k为10表示返回前10个结果，score_threshold为0.15表示相似度分数的阈值为0.15
    search_kwargs={
        "k": 3,
        "score_threshold": 0.1,
    }
)
# print(retriever.invoke("什么是四大名著"))

@tool
def search(query: str) -> list[str]:
    """当需要获取有关四大名著的相关信息时，使用此工具搜索知识库并返回相关信息"""
    docs = []
    for inum, doc in enumerate(retriever.invoke(query)):
        docs.append(doc.page_content)
    return docs
# 工具类别
tools = [search]


# 创建工具节点
tool_nodes = ToolNode(tools)


# 实际请以官方文档为准
model = get_chat_openai_zhipu_flash_250414().bind_tools(tools)

def should_continue(state: MessagesState) -> Literal["tools", END]:
    """Check if the last message is a tool call."""
    messages = state["messages"]
    last_message = messages[-1]
    if last_message.tool_calls:
        return "tools"
    return END
    # if isinstance(state["messages"][-1], ToolMessage):
    #     return "tools"
    # return END

# 1.定义调用模型的函数
def call_model(state: MessagesState) -> MessagesState:
    """Call the model with the messages."""
    messages = state["messages"]
    # messages[HumanMessage(content='上海的天气怎么样', additional_kwargs={}, response_metadata={},
    #                       id='541dac4c-be5a-4391-8f16-d34d95c6409b')]
    # 调用模型
    # 提取消息内容，创建新的 HumanMessage 实例，不包含 id
    response = model.invoke(messages)
    # 将模型的响应添加到消息列表中
    # state["messages"].append(response)
    return {"messages": [response]}

# 2.用状态初始化，定义一个新的状态图
workflow = StateGraph(MessagesState)

# 3.定义图节点, 定义我们将循环的2个节点
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_nodes)

# 4.定义入口点和图边
workflow.set_entry_point("agent")

# 添加条件边
workflow.add_conditional_edges(
    "agent",
    should_continue
)

# 定义图边
workflow.add_edge("tools", "agent")

#初始化内存在图运行之间持久化状态
checkpointer = MemorySaver() #可以存redis

# 5.编译
app = workflow.compile(checkpointer=checkpointer)


while True:
    user_input = input("User: ")
    if user_input.lower() in ["quit", "exit", "q"]:
        print("Goodbye!")
        break
    final_response = app.invoke({"messages": [HumanMessage(content=user_input)]},
                                config={"configurable": {"thread_id": "42"}})
    pprint(final_response)
    result = final_response['messages'][-1].content
    print(result)

