# 环节4：构建向量数据库（适配新版Chroma，移除persist调用）
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import chromadb  # 直接使用chromadb底层API
from chromadb.config import Settings
import os


def initialize_tongyi_embeddings(api_key):
    try:
        embeddings = DashScopeEmbeddings(
            model="text-embedding-v4",
            dashscope_api_key=api_key
        )
        return embeddings
    except Exception as e:
        print(f"❌ 通义千问初始化失败：{str(e)}")
        raise


def load_split_vectorize_pdf(pdf_path, api_key):
    # 1. 加载PDF
    try:
        loader = PyPDFLoader(pdf_path)
        pages = loader.load()
        print(f"📄 成功加载PDF：{pdf_path}（{len(pages)}页）")
    except FileNotFoundError:
        raise FileNotFoundError(f"❌ 找不到PDF文件：{pdf_path}")

    # 2. 分割文档
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=500,
        chunk_overlap=100,
        length_function=len
    )
    split_docs = splitter.split_documents(pages)
    print(f"✂️  文档分割完成：{len(split_docs)}个片段")

    # 3. 通义千问向量化（分批次处理）
    embeddings = initialize_tongyi_embeddings(api_key)
    texts = [doc.page_content for doc in split_docs]
    total = len(texts)
    batch_size = 10  # 通义千问限制：每批最多10个
    all_vectors = []

    print(f"📡 开始调用通义千问API（分批次处理，共{total}个片段）...")
    for i in range(0, total, batch_size):
        batch_texts = texts[i:i + batch_size]
        batch_num = i // batch_size + 1
        print(f"   处理批次 {batch_num}：{len(batch_texts)}个片段（{i + 1}-{min(i + batch_size, total)}/{total}）")

        try:
            batch_vectors = embeddings.embed_documents(batch_texts)
            all_vectors.extend(batch_vectors)
        except Exception as e:
            raise Exception(f"❌ 批次{batch_num}处理失败：{str(e)}")

    print(f"✅ 向量化完成：{len(all_vectors)}个1536维向量")
    return split_docs, all_vectors


# --------------------------
# 核心修改：删除client.persist()调用
# --------------------------
def build_chroma_vector_db(split_docs, vectors, db_path="./tongyi_chroma_db"):
    if len(split_docs) != len(vectors):
        raise ValueError(f"❌ 数据不匹配：文档{len(split_docs)}个，向量{len(vectors)}个")

    # 清理旧数据库
    if os.path.exists(db_path):
        print(f"⚠️  发现旧数据库，将删除重建：{db_path}")
        import shutil
        shutil.rmtree(db_path)

    try:
        # 1. 初始化Chroma客户端（新版自动持久化）
        client = chromadb.Client(
            Settings(
                persist_directory=db_path,  # 数据存储路径
                is_persistent=True  # 启用持久化（新版仍需此参数）
            )
        )

        # 2. 创建集合
        collection = client.create_collection(
            name="insurance_manual",
            metadata={"description": "平安保险用户手册向量集合"}
        )

        # 3. 准备数据
        documents = [doc.page_content for doc in split_docs]
        metadatas = [doc.metadata for doc in split_docs]
        ids = [f"doc_{i}" for i in range(len(split_docs))]

        # 4. 手动添加所有数据
        collection.add(
            embeddings=vectors,
            documents=documents,
            metadatas=metadatas,
            ids=ids
        )

        # 关键修改：删除client.persist()——新版自动持久化，无需手动调用
        print(f"✅ Chroma数据库构建完成：{os.path.abspath(db_path)}")
        print(f"   - 存储片段数：{collection.count()} 个")
        print(f"   - 向量维度：{len(vectors[0])} 维")
        return collection

    except Exception as e:
        raise Exception(f"❌ 数据库构建失败：{str(e)}")


def load_existing_chroma_db(db_path="./tongyi_chroma_db"):
    if not os.path.exists(db_path):
        raise FileNotFoundError(f"❌ 未找到数据库：{db_path}")

    try:
        # 初始化客户端并加载集合
        client = chromadb.Client(
            Settings(
                persist_directory=db_path,
                is_persistent=True
            )
        )
        collection = client.get_collection(name="insurance_manual")
        print(f"✅ 加载现有数据库：{os.path.abspath(db_path)}")
        print(f"   - 存储片段数：{collection.count()} 个")
        return collection
    except Exception as e:
        raise Exception(f"❌ 数据库加载失败：{str(e)}")


if __name__ == "__main__":
    PDF_FILE_PATH = "平安保险用户手册.pdf"
    TONGYI_API_KEY = "sk-6434b554122c4b049ceb805d703f695b"  # 替换为你的API Key
    DB_PATH = "./tongyi_chroma_db"

    try:
        print("=" * 50)
        print("1. 开始：PDF加载→分割→向量化")
        print("=" * 50)
        split_docs, doc_vectors = load_split_vectorize_pdf(
            pdf_path=PDF_FILE_PATH,
            api_key=TONGYI_API_KEY
        )

        print("\n" + "=" * 50)
        print("2. 开始：构建Chroma向量数据库")
        print("=" * 50)
        chroma_collection = build_chroma_vector_db(
            split_docs=split_docs,
            vectors=doc_vectors,
            db_path=DB_PATH
        )

        print("\n" + "=" * 50)
        print("3. 测试：检索与「平安福投保条件」相关的片段")
        print("=" * 50)
        loaded_collection = load_existing_chroma_db(db_path=DB_PATH)

        query = "平安福2024版的投保条件是什么？"
        query_embedding = initialize_tongyi_embeddings(TONGYI_API_KEY).embed_query(query)

        results = loaded_collection.query(
            query_embeddings=[query_embedding],
            n_results=2
        )

        print(f"🔍 检索关键词：{query}")
        for i in range(len(results['ids'][0])):
            print(f"\n【相关片段 {i + 1}】")
            print(f"📄 来源页码：第 {results['metadatas'][0][i].get('page', 0) + 1} 页")
            print(f"📝 片段内容：{results['documents'][0][i]}")
            print(f"📊 相似度分数：{results['distances'][0][i]:.4f}")
    except Exception as e:
        print(f"\n❌ 程序运行失败：{str(e)}")