import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain_community.llms import Tongyi
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
from langchain.prompts import PromptTemplate

# 1. 读取faq.txt
with open(r"data/faq.txt", "r", encoding="utf-8") as f:
    faq_text = f.read()

# 2. 用SpaceTextSplitter分块
splitter = RecursiveCharacterTextSplitter(
    chunk_size=300,
    chunk_overlap=0,
    separators=["\n\n"]
)
docs = splitter.create_documents([faq_text])

# 3. 用通义千问text-embedding-v1生成向量，存入FAISS
embeddings = DashScopeEmbeddings(model="text-embedding-v1")
vectorstore = FAISS.from_documents(docs, embeddings)

# 4. 构建RAG问答链
custom_prompt = PromptTemplate(
    input_variables=["context", "question"],
    template="""
根据faq.txt的内容回答问题。如果找不到答案，回复 '我不知道，请联系人工客服'。

FAQ内容:
{context}

问题: {question}
答案:
"""
)
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})

qa_chain = ConversationalRetrievalChain.from_llm(
    llm=Tongyi(model="qwen-plus", temperature=0),
    retriever=retriever,
    return_source_documents=False,
    combine_docs_chain_kwargs={"prompt": custom_prompt}
)

print("欢迎使用RAG问答系统，输入问题开始，多轮对话，输入 quit 退出。")

chat_history = []
while True:
    query = input("\n你：")
    if query.strip().lower() == "quit":
        print("再见！")
        break

    # 调试：查看检索到的文档
    docs = retriever.invoke(query)
    print("\n[调试] 检索到的文档：")
    for i, doc in enumerate(docs, 1):
        print(f"--- 文档 {i} ---")
        print(doc.page_content)
        print("--------------")

    print("\n[调试] 结束：")
    print("--------------------------------")

    result = qa_chain.invoke({"question": query, "chat_history": chat_history})
    print("AI：", result["answer"])
    chat_history.append((query, result["answer"]))