import re
import os
import numpy as np
import hashlib
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings, ChatOllama
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain.chains import RetrievalQA
from langchain_core.prompts import ChatPromptTemplate
import faiss

# 模型名称
MODEL_NAME = "llama3:8b"

# ===============================
# 1️⃣ 文档加载
# ===============================
def load_documents(data_dir="../docs"):
    documents = []
    txt_files = [f for f in os.listdir(data_dir) if f.endswith(".txt")]
    for file_name in txt_files:
        file_path = os.path.join(data_dir, file_name)
        with open(file_path, "r", encoding="utf-8") as f:
            content = f.read()
        documents.append(
            Document(
                page_content=content,
                metadata={"source": file_name.replace(".txt", "")}
            )
        )
    return documents

# ===============================
# 2️⃣ 中文自然段切割
# ===============================
def chinese_paragraph_chunker(text, chunk_size=250, chunk_overlap=100):
    paragraphs = [p.strip() for p in text.split('\n') if p.strip()]
    chunks = []
    buffer = ""

    for para in paragraphs:
        sentences = re.split(r'(。|！|？|\n)', para)
        sentences = [s for s in sentences if s.strip()]
        for s in sentences:
            if len(buffer) + len(s) <= chunk_size:
                buffer += s
            else:
                if buffer:
                    chunks.append(buffer)
                buffer = buffer[-chunk_overlap:] + s if chunk_overlap < len(buffer) else buffer + s

    if buffer:
        chunks.append(buffer)

    return chunks

# ===============================
# 3️⃣ 主流程
# ===============================
if __name__ == "__main__":
    # 加载文档
    documents = load_documents()
    print(f"加载文档数量：{len(documents)}")

    # 切分 chunk 并去重
    chunked_documents = []
    seen_hashes = set()
    for doc in documents:
        chunks = chinese_paragraph_chunker(doc.page_content, chunk_size=250, chunk_overlap=100)
        for i, chunk in enumerate(chunks):
            chunk_hash = hashlib.sha256(chunk.encode("utf-8")).hexdigest()
            if chunk_hash in seen_hashes:
                continue
            seen_hashes.add(chunk_hash)
            chunked_documents.append(
                Document(
                    page_content=chunk,
                    metadata={**doc.metadata, "chunk_index": i}
                )
            )
    print(f"切分后有效 chunk 数量（去重后）：{len(chunked_documents)}")

    # ===============================
    # 4️⃣ Ollama Embeddings + FAISS
    # ===============================
    embedding_model = OllamaEmbeddings(model=MODEL_NAME)
    vectors = [embedding_model.embed_query(doc.page_content) for doc in chunked_documents]
    vectors_np = np.array(vectors).astype("float32")
    dim = vectors_np.shape[1]

    index = faiss.IndexFlatL2(dim)
    index.add(vectors_np)

    docstore = InMemoryDocstore({str(i): doc for i, doc in enumerate(chunked_documents)})

    vector_store = FAISS(
        index=index,
        embedding_function=embedding_model,  # 直接传 Embeddings 对象，去警告
        docstore=docstore,
        index_to_docstore_id={i: str(i) for i in range(len(chunked_documents))}
    )
    print("FAISS 向量库创建完成。")

    # ===============================
    # 5️⃣ Prompt & LLM
    # ===============================
    prompt_template = """
    请先根据你已有的知识理解回答用户的问题，仅在必要时参考以下检索到的文档内容。  
    回答要求：
    - 用简体中文
    - 尽量准确完整
    - 如果文档信息不足，可以用你自己的理解补充

    文档内容：
    {context}

    用户问题：
    {question}
    """
    prompt = ChatPromptTemplate.from_template(prompt_template)
    llm = ChatOllama(model=MODEL_NAME)

    # ===============================
    # 6️⃣ RAG 问答链（推荐写法）
    # ===============================
    retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k":5})

    # 使用 from_chain_type 并传入 chain_type_kwargs 自定义 Prompt
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=retriever,
        return_source_documents=True,
        chain_type_kwargs={"prompt": prompt}  # ✅ 自定义 Prompt 生效
    )

    # ===============================
    # 7️⃣ 测试 RAG 问答
    # ===============================
    question = "单车乱停乱放造成的城市交通问题有哪些？"
    result = qa_chain.invoke({"query": question})

    print("\n=== 模型回答 ===")
    print(result['result'])

    print("\n=== 检索到的源文档 ===")
    for doc in result['source_documents']:
        print(f"来源: {doc.metadata.get('source', '未知')} | chunk_index: {doc.metadata.get('chunk_index', 'N/A')}")
        print(doc.page_content[:300], "...\n")
