# pip install mem0ai -i https://pypi.tuna.tsinghua.edu.cn/simple/

import os
from typing import TypedDict, Annotated
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import ToolNode
from mem0 import Memory
from langchain_core.messages import HumanMessage, AIMessage
from langchain.chat_models import init_chat_model

# 设置 API 密钥
api_key = os.getenv("DEEPSEEK_API_KEY", "sk-……") # 请替换为你的 DeepSeek API 密钥
os.environ["DEEPSEEK_API_KEY"] = api_key

# 创建配置
config = {
    "llm": {
        "provider": "deepseek",
        "model": "deepseek-chat",
        "temperature": 0.2,
        "max_tokens": 2000,
        "top_p": 1.0
    },
    "embedder": {
        "provider": "ollama",
        "model": "mxbai-embed-large"
    },
    "vector_store": {
        "provider": "qdrant",
        "config": {
            "collection_name": "mem0_new",
            "embedding_model_dims": 768,
            "host": "localhost",
            "port": 6333,
        }
    },
    "reset_vector_store": True
}

# 初始化 Mem0
m = Memory.from_config(config)

llm = init_chat_model(
    "deepseek-chat",  # 使用DeepSeek模型
    api_key=api_key
)

# 定义状态类型
class State(TypedDict):
    messages: list
    user_id: str
    memories: list

# 定义记忆检索函数
def retrieve_memories(state: State):
    query = state["messages"][-1]["content"]
    user_id = state.get("user_id", "alice")
    memories = m.search(query=query, user_id=user_id)
    return {"memories": memories["results"]}

# 定义 LLM 节点
def llm_node(state: State):
    # 构建系统提示，包含记忆
    memories_str = "\n".join(f"- {m['memory']}"for m in state["memories"])
    system_prompt = f"你是一个有记忆能力的助手。以下是用户的相关记忆：\n{memories_str}\n请基于这些记忆回答问题。"
    
    # 准备消息
    messages = [{"role": "system", "content": system_prompt}]
    messages.extend(state["messages"])
    
    # 调用 LLM
    response = llm.invoke(messages)
    
    # 更新消息列表，转换为 dict
    new_message = {"role": "assistant", "content": response.content}
    return {"messages": state["messages"] + [new_message]}

# 定义记忆存储函数
def store_memories(state: State):
    user_id = state.get("user_id", "alice")
    # 现在 state["messages"] 已全为 dict
    m.add(state["messages"], user_id=user_id)
    return {}

# 构建图
graph_builder = StateGraph(State)
graph_builder.add_node("retrieve_memories", retrieve_memories)
graph_builder.add_node("llm", llm_node)
graph_builder.add_node("store_memories", store_memories)

# 添加边
graph_builder.add_edge(START, "retrieve_memories")
graph_builder.add_edge("retrieve_memories", "llm")
graph_builder.add_edge("llm", "store_memories")
graph_builder.add_edge("store_memories", END)

# 编译图
graph = graph_builder.compile()

# 使用图处理查询
result = graph.invoke({
    "messages": [{"role": "user", "content": "我最喜欢什么电影？"}],
    "user_id": "alice",
    "memories": []
})

print(result)
