from langgraph.graph import StateGraph, END
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.types import State
from typing import TypedDict, List

from sentence_transformers import SentenceTransformer
import faiss

# 1. 定义对话状态
class ChatState(TypedDict):
    messages: List[str]
    retrievals: List[str]

# 2. 初始化 embedding 模型 + 向量数据库
embedding_model = SentenceTransformer("BAAI/bge-small-en")
dimension = embedding_model.get_sentence_embedding_dimension()
index = faiss.IndexFlatL2(dimension)
memory_store = []  # 保存原始文本

def add_to_memory(text: str):
    vec = embedding_model.encode([text])
    index.add(vec)
    memory_store.append(text)

def search_memory(query: str, top_k=3):
    vec = embedding_model.encode([query])
    D, I = index.search(vec, top_k)
    return [memory_store[i] for i in I[0] if i < len(memory_store)]

# 3. 节点定义
def retrieve_node(state: ChatState) -> ChatState:
    query = state["messages"][-1]  # 最近用户输入
    related = search_memory(query)
    return {"retrievals": related}

def llm_node(state: ChatState) -> ChatState:
    query = state["messages"][-1]
    related = state["retrievals"]

    # 这里假设你用大模型 API，例如 OpenAI 或本地模型
    context = "\n".join(related)
    prompt = f"用户问题：{query}\n相关记忆：{context}\n请给出回答："

    # mock 模型调用
    answer = f"[回答] {query} (结合记忆: {context})"

    # 存储到长期记忆
    add_to_memory(query)
    add_to_memory(answer)

    return {"messages": state["messages"] + [answer]}

# 4. 构建工作流
graph = StateGraph(ChatState)

graph.add_node("retriever", retrieve_node)
graph.add_node("llm", llm_node)

graph.set_entry_point("retriever")
graph.add_edge("retriever", "llm")
graph.add_edge("llm", END)

# 5. 加入 checkpointer（短期记忆）
checkpointer = SqliteSaver.from_conn_string(":memory:")
app = graph.compile(checkpointer=checkpointer)

# 6. 测试运行
state = {"messages": ["你好，帮我找一下学习Python的经验"]}
config = {"configurable": {"thread_id": "1"}}
final_state = app.invoke(state, config=config)

print("对话历史：", final_state["messages"])
