from langchain.text_splitter import RecursiveCharacterTextSplitter
import faiss
import numpy as np
import pickle
import os

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings


# --------------------------
# 1. 读取本地文本文件
# --------------------------
def load_paper_text(file_path):
    try:
        with open(file_path, "r", encoding="utf-8") as f:
            return f.read()
    except FileNotFoundError:
        print(f"错误：未找到文件 {file_path}")
        return None
    except Exception as e:
        print(f"读取文件失败：{e}")
        return None


# --------------------------
# 2. 文本语义分割
# --------------------------
def split_text(content):
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=2000,  # 设置每块的最大字符数
        chunk_overlap=200,  # 设置块之间的重叠字符数
        length_function=len
    )
    chunks = text_splitter.split_text(content)
    print(f"文本分割完成，共生成 {len(chunks)} 个文本块")
    return chunks


# --------------------------
# 3. 初始化嵌入模型（生成向量）
# --------------------------
def init_embedding_model():
    DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
    embeddings = DashScopeEmbeddings(
        dashscope_api_key=DASHSCOPE_API_KEY,
        model="text-embedding-v1"
    )
    return embeddings


# --------------------------
# 4. 使用FAISS存储向量
# --------------------------
def store_in_faiss(chunks, embeddings):
    print("正在生成文本向量...")
    vectors = embeddings.embed_documents(chunks)
    vector_dim = len(vectors[0])

    index = faiss.IndexFlatL2(vector_dim)
    index.add(np.array(vectors, dtype=np.float32))  # 将向量添加到FAISS索引中
    print(f"FAISS索引创建完成，共存储 {index.ntotal} 个向量（维度：{vector_dim}）")

    faiss.write_index(index, "faiss_index.index")
    with open("chunks.pkl", "wb") as f:
        pickle.dump(chunks, f)
    print("向量索引和文本块已保存到本地（faiss_index.index 和 chunks.pkl）")

    return index, chunks


# --------------------------
# 5. 从FAISS检索示例
# --------------------------
def search_faiss(query, embeddings, top_k=2):
    if not os.path.exists("faiss_index.index") or not os.path.exists("chunks.pkl"):
        print("未找到FAISS索引文件，请先执行存储步骤")
        return []

    index = faiss.read_index("faiss_index.index")
    with open("chunks.pkl", "rb") as f:
        chunks = pickle.load(f)

    query_vector = embeddings.embed_query(query)  # 将查询文本转换为向量
    query_vector_np = np.array([query_vector], dtype=np.float32)

    distances, indices = index.search(query_vector_np, top_k)  # 从FAISS索引中检索最相似的向量

    results = []
    for i, idx in enumerate(indices[0]):
        results.append({
            "text": chunks[idx],
            "distance": distances[0][i]
        })
    return results


# --------------------------
# 主函数
# --------------------------
if __name__ == "__main__":
    # 1. 读取文本
    paper_content = load_paper_text("体育运动与身体健康论文.txt")
    if not paper_content:
        exit(1)

    # 2. 分割文本
    text_chunks = split_text(paper_content)

    # 3. 初始化嵌入模型
    embeddings = init_embedding_model()

    # 4. 存入FAISS
    faiss_index, chunks = store_in_faiss(text_chunks, embeddings)

    # 5. 测试检索
    query = "体育运动对免疫力的影响"
    search_results = search_faiss(query, embeddings, top_k=2)
    print("\nFAISS检索结果：")
    for i, res in enumerate(search_results, 1):
        print(f"\n结果{i}（距离：{res['distance']:.4f}）：")
        print(res['text'][:300] + "...")
