# RAG全流程整合代码（环节1-4）
# 功能：从PDF加载到Milvus存储的完整向量数据库构建流程
# 环境：Python 3.12, pymilvus==2.6.0, langchain==0.3.27, dashscope==1.24.1

import os
import uuid
import time
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pymilvus import (
    connections,
    FieldSchema, CollectionSchema, DataType,
    Collection,
    utility,
    MilvusException
)

# --------------------------
# 配置参数（请根据实际环境修改）
# --------------------------
CONFIG = {
    "pdf_path": "平安保险用户手册.pdf",  # 替换为你的PDF文件路径
    "tongyi_api_key": "sk-6434b554122c4b049ceb805d703f695b",  # 替换为你的阿里云API Key
    "milvus_host": "localhost",
    "milvus_port": "19530",
    "collection_name": "insurance_manual",
    "batch_size": 10,  # 通义千问API批量处理上限
    "chunk_size": 500,  # 文档分割单片段长度
    "chunk_overlap": 100,  # 片段重叠长度
    "retry_count": 3  # Milvus连接重试次数
}


# --------------------------
# 环节1：PDF文档加载
# --------------------------
def load_pdf_document(pdf_path):
    """加载PDF文档并返回分页内容"""
    if not os.path.exists(pdf_path):
        raise FileNotFoundError(f"PDF文件不存在: {pdf_path}")

    # 加载PDF文档
    loader = PyPDFLoader(pdf_path)
    documents = loader.load()

    print(f"📄 环节1：PDF加载完成")
    print(f"   - 文件名: {os.path.basename(pdf_path)}")
    print(f"   - 总页数: {len(documents)}\n")
    return documents


# --------------------------
# 环节2：文档分割
# --------------------------
def split_documents(documents, chunk_size, chunk_overlap):
    """将长文档分割为适合向量化的短片段"""
    # 初始化文本分割器
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        length_function=len,
        separators=["\n\n", "\n", "。", "！", "？", "，", "、"]  # 中文优化分割符
    )

    # 执行分割
    split_docs = text_splitter.split_documents(documents)

    print(f"✂️  环节2：文档分割完成")
    print(f"   - 原始页数: {len(documents)}")
    print(f"   - 分割后片段数: {len(split_docs)}")
    print(f"   - 平均片段长度: {sum(len(d.page_content) for d in split_docs) / len(split_docs):.0f}字符\n")
    return split_docs


# --------------------------
# 环节3：文本向量化
# --------------------------
def initialize_embedding_model(api_key, model_name="text-embedding-v4"):
    """初始化通义千问嵌入模型并验证"""
    try:
        embeddings = DashScopeEmbeddings(
            model=model_name,
            dashscope_api_key=api_key
        )

        # 测试向量维度
        test_vector = embeddings.embed_query("测试向量")
        vector_dim = len(test_vector)

        print(f"🔄 环节3：嵌入模型初始化完成")
        print(f"   - 模型名称: {model_name}")
        print(f"   - 向量维度: {vector_dim}\n")
        return embeddings, vector_dim
    except Exception as e:
        raise Exception(f"嵌入模型初始化失败: {str(e)}\n请检查API Key是否有效")


def vectorize_documents(split_docs, embeddings, batch_size):
    """将分割后的文档转换为向量"""
    texts = [doc.page_content for doc in split_docs]
    total = len(texts)
    vectors = []

    print(f"📊 开始向量化处理（共{total}个片段）")

    # 分批次处理
    for i in range(0, total, batch_size):
        batch_texts = texts[i:i + batch_size]
        batch_num = i // batch_size + 1
        start_idx = i + 1
        end_idx = min(i + batch_size, total)

        try:
            batch_vectors = embeddings.embed_documents(batch_texts)
            vectors.extend(batch_vectors)
            print(f"   批次{batch_num}: 处理完成 ({start_idx}-{end_idx}/{total})")
        except Exception as e:
            raise Exception(f"批次{batch_num}处理失败: {str(e)}")

    print(f"✅ 向量化完成，生成{len(vectors)}个向量\n")
    return vectors


# --------------------------
# 环节4：Milvus向量数据库操作
# --------------------------
def connect_to_milvus(host, port, retry_count):
    """连接到Milvus服务"""
    for retry in range(retry_count):
        try:
            # 断开现有连接
            if connections.has_connection("default"):
                connections.disconnect("default")

            # 建立新连接
            connections.connect(
                alias="default",
                host=host,
                port=port,
                timeout=30
            )

            # 验证连接
            server_version = utility.get_server_version()
            print(f"📌 环节4：Milvus连接成功")
            print(f"   - 服务版本: {server_version}\n")
            return True
        except Exception as e:
            if retry < retry_count - 1:
                print(f"   连接尝试{retry + 1}失败，重试中...")
                time.sleep(3)
            else:
                raise Exception(f"Milvus连接失败: {str(e)}")


def create_milvus_collection(collection_name, vector_dim):
    """创建Milvus集合及索引"""
    # 定义字段
    fields = [
        FieldSchema(name="id", dtype=DataType.VARCHAR, max_length=64, is_primary=True),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2000),
        FieldSchema(name="page", dtype=DataType.INT64)
    ]

    # 定义集合 schema
    schema = CollectionSchema(
        fields=fields,
        description="保险手册向量集合"
    )

    # 删除已有集合
    if utility.has_collection(collection_name):
        utility.drop_collection(collection_name)
        print(f"   已删除现有集合: {collection_name}")

    # 创建新集合
    collection = Collection(name=collection_name, schema=schema)

    # 创建索引
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 128}
    }
    collection.create_index(field_name="vector", index_params=index_params)

    print(f"   集合创建完成: {collection_name} (维度: {vector_dim})")
    return collection


def insert_data_to_milvus(collection, split_docs, vectors):
    """插入数据到Milvus集合"""
    # 准备数据
    data = []
    for i in range(len(split_docs)):
        doc = split_docs[i]
        data.append({
            "id": str(uuid.uuid4()),
            "vector": vectors[i],
            "text": doc.page_content[:1999],  # 截断防止超长
            "page": doc.metadata.get("page", 0) + 1  # 页码从1开始
        })

    # 执行插入
    mr = collection.insert(data)

    # 刷新并加载集合
    collection.flush()
    collection.load()

    # 验证插入结果
    count = collection.num_entities
    print(f"   数据插入完成: {count}条记录")
    return collection


def search_similar(collection, query, embeddings, top_k=2):
    """在Milvus中搜索相似内容"""
    # 生成查询向量
    query_vector = embeddings.embed_query(query)

    # 搜索参数
    search_params = {
        "metric_type": "L2",
        "params": {"nprobe": 10}
    }

    # 执行搜索
    results = collection.search(
        data=[query_vector],
        anns_field="vector",
        param=search_params,
        limit=top_k,
        output_fields=["text", "page"]
    )

    # 处理结果
    print(f"\n🔍 检索结果: {query}")
    for i, hit in enumerate(results[0], 1):
        print(f"\n结果{i}:")
        print(f"   页码: 第{hit.entity.page}页")
        print(f"   内容: {hit.entity.text[:200]}...")
        print(f"   相似度: {hit.distance:.4f}")
    return results


# --------------------------
# 主执行流程
# --------------------------
def main():
    try:
        # 环节1：加载PDF
        documents = load_pdf_document(CONFIG["pdf_path"])

        # 环节2：分割文档
        split_docs = split_documents(
            documents,
            CONFIG["chunk_size"],
            CONFIG["chunk_overlap"]
        )

        # 环节3：初始化嵌入模型并向量化
        embeddings, vector_dim = initialize_embedding_model(CONFIG["tongyi_api_key"])
        vectors = vectorize_documents(split_docs, embeddings, CONFIG["batch_size"])

        # 环节4：Milvus操作
        connect_to_milvus(CONFIG["milvus_host"], CONFIG["milvus_port"], CONFIG["retry_count"])
        collection = create_milvus_collection(CONFIG["collection_name"], vector_dim)
        collection = insert_data_to_milvus(collection, split_docs, vectors)

        # 测试检索
        search_similar(collection, "平安福2024版的投保条件是什么？", embeddings)

        print("\n🎉 全流程执行完成")

    except Exception as e:
        print(f"\n❌ 执行失败: {str(e)}")


if __name__ == "__main__":
    main()
