import re
import os
import numpy as np
import hashlib
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain_community.docstore.in_memory import InMemoryDocstore
import faiss

# 文档
documents = []

# 加载文档
def load_documents():
    global documents
    data_dir = "../docs"

    txt_files = [f for f in os.listdir(data_dir) if f.endswith(".txt")]
    documents = []

    for file_name in txt_files:
        file_path = os.path.join(data_dir, file_name)
        with open(file_path, "r", encoding="utf-8") as f:
            content = f.read()

        doc = Document(
            page_content=content,
            metadata={"source": file_name.replace(".txt", "")}
        )
        documents.append(doc)


# 自然段切割文档
def chinese_paragraph_chunker(text, chunk_size=250, chunk_overlap=100):
    paragraphs = [p.strip() for p in text.split('\n') if p.strip()]
    chunks = []
    buffer = ""

    for para in paragraphs:
        sentences = re.split(r'(。|！|？|\n)', para)
        sentences = [s for s in sentences if s.strip()]

        for s in sentences:
            if len(buffer) + len(s) <= chunk_size:
                buffer += s
            else:
                if buffer:
                    chunks.append(buffer)
                buffer = buffer[-chunk_overlap:] + s if chunk_overlap < len(buffer) else buffer + s

    if buffer:
        chunks.append(buffer)

    return chunks


if __name__ == '__main__':
    # 读取文档
    load_documents()

    # 切分文档并去重
    chunked_documents = []
    seen_hashes = set()  # 用于去重

    for doc in documents:
        chunks = chinese_paragraph_chunker(doc.page_content, chunk_size=250, chunk_overlap=100)
        for i, chunk in enumerate(chunks):
            # 计算 chunk 哈希
            chunk_hash = hashlib.sha256(chunk.encode("utf-8")).hexdigest()
            if chunk_hash in seen_hashes:
                continue  # 重复 chunk，跳过
            seen_hashes.add(chunk_hash)

            chunked_documents.append(
                Document(
                    page_content=chunk,
                    metadata={**doc.metadata, "chunk_index": i}
                )
            )

    print(f"切分后有效 chunk 数量（去重后）：{len(chunked_documents)}")

    # 初始化 Ollama Embeddings
    embedding_model = OllamaEmbeddings(model="llama3:8b")

    # 逐条生成向量
    vectors = []
    for doc in chunked_documents:
        vec = embedding_model.embed_query(doc.page_content)
        vectors.append(vec)

    # 转为 numpy array
    vectors_np = np.array(vectors).astype("float32")
    dim = vectors_np.shape[1]

    # 手动创建 FAISS 索引
    index = faiss.IndexFlatL2(dim)
    index.add(vectors_np)

    # 构建 InMemoryDocstore
    docstore = InMemoryDocstore({str(i): doc for i, doc in enumerate(chunked_documents)})

    # 构建 LangChain FAISS 对象
    vector_store = FAISS(
        index=index,
        embedding_function=embedding_model,  # 直接传 Embeddings 对象，去除警告
        docstore=docstore,
        index_to_docstore_id={i: str(i) for i in range(len(chunked_documents))}
    )

    print(f"已加载 {len(chunked_documents)} 个去重后的中文 chunk 到 FAISS 向量库")

    # 1️⃣ 查询文本
    query_text = "单车乱停乱放造成的城市交通问题有哪些？"
    top_k = 5

    # 2️⃣ 使用 FAISS 进行相似性检索
    results = vector_store.similarity_search_with_score(query_text, k=top_k)

    # 3️⃣ 打印结果
    for i, (doc, score) in enumerate(results, 1):
        print(f"--- Result {i} ---")
        print("来源:", doc.metadata.get("source", "未知"))
        print("chunk_index:", doc.metadata.get("chunk_index", "N/A"))
        print("相似度分数:", score)
        print("内容预览:", doc.page_content[:300], "...\n")
