import os
import pickle
import logging
import re
from typing import Tuple, List, Dict
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from pymilvus import (
    connections,
    FieldSchema,
    CollectionSchema,
    DataType,
    Collection,
    utility,
    MilvusException
)

# 日志配置
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()
MILVUS_HOST = os.getenv("MILVUS_HOST", "localhost")
MILVUS_PORT = os.getenv("MILVUS_PORT", "19530")
DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
COLLECTION_NAME = "spdb_xian_personal_manager"
EMBEDDING_MODEL = "text-embedding-v2"  # 阿里百炼嵌入模型版本


def get_embedding_dimension(embedding_model: DashScopeEmbeddings) -> int:
    """获取嵌入模型的实际输出维度（解决维度不匹配问题）"""
    test_text = "测试文本"
    test_vector = embedding_model.embed_query(test_text)
    dim = len(test_vector)
    logger.info(f"嵌入模型{EMBEDDING_MODEL}实际输出维度：{dim}")
    return dim


def extract_text_with_page_numbers(pdf_reader: PdfReader) -> Tuple[List[str], List[int]]:
    """
    提取每页文本并清理水印，返回“每页文本块列表+对应页码”
    使用正则表达式增强水印去除能力，覆盖多种变体
    """
    page_chunks = []
    page_numbers = []

    # 正则模式：匹配百度文库水印的各种变体
    # 支持不同空格数量、减号类型（-、－）、逗号类型（,、，）及前后空白
    watermark_pattern = re.compile(r'\s*百度文库[ -－]+\s*好好学习[，,]\s*天天向上\s*')

    for page_idx, page in enumerate(pdf_reader.pages, start=1):
        page_text = page.extract_text()
        if not page_text:
            logger.warning(f"PDF第{page_idx}页无文本内容，已跳过")
            continue

        # 1. 去除水印
        clean_text = watermark_pattern.sub('', page_text).strip()

        # 2. 清理连续空白字符（空格、换行等）
        clean_text = re.sub(r'\s+', ' ', clean_text).strip()

        if not clean_text:
            logger.warning(f"PDF第{page_idx}页清理后无有效文本，已跳过")
            continue

        page_chunks.append(clean_text)
        page_numbers.append(page_idx)
        logger.debug(f"PDF第{page_idx}页提取有效文本长度：{len(clean_text)}字符")

    logger.info(f"PDF文本提取完成，共{len(page_chunks)}页有效文本，对应{len(page_numbers)}个页码")
    return page_chunks, page_numbers


def init_milvus_collection(vector_dim: int) -> Collection:
    """初始化Milvus集合，根据嵌入模型实际维度创建向量字段"""
    # 连接Milvus服务
    try:
        connections.connect(
            alias="default",
            host=MILVUS_HOST,
            port=MILVUS_PORT
        )
        logger.info(f"成功连接Milvus服务：{MILVUS_HOST}:{MILVUS_PORT}")
    except MilvusException as e:
        logger.error(f"Milvus服务连接失败：{str(e)}", exc_info=True)
        raise

    # 定义集合字段
    fields = [
        FieldSchema(
            name="id",
            dtype=DataType.INT64,
            is_primary=True,
            auto_id=True
        ),
        FieldSchema(
            name="text_chunk",
            dtype=DataType.VARCHAR,
            max_length=2048
        ),
        FieldSchema(
            name="vector",
            dtype=DataType.FLOAT_VECTOR,
            dim=vector_dim  # 使用实际嵌入维度
        ),
        FieldSchema(
            name="page_number",
            dtype=DataType.INT32
        )
    ]

    # 创建集合Schema
    schema = CollectionSchema(
        fields=fields,
        description="浦发银行西安分行个金客户经理考核办法PDF的文本向量集合"
    )

    # 处理已存在的集合
    if utility.has_collection(COLLECTION_NAME, using="default"):
        existing_collection = Collection(COLLECTION_NAME, using="default")
        existing_schema = existing_collection.schema
        existing_vector_dim = [f.dim for f in existing_schema.fields if f.name == "vector"][0]

        if existing_vector_dim != vector_dim:
            logger.error(
                f"现有集合向量维度{existing_vector_dim}与模型维度{vector_dim}不匹配，将删除重建")
            utility.drop_collection(COLLECTION_NAME, using="default")
        else:
            existing_collection.load()
            logger.info(f"已加载现有集合{COLLECTION_NAME}（维度{vector_dim}）")
            return existing_collection

    # 创建新集合
    collection = Collection(COLLECTION_NAME, schema=schema, using="default")
    logger.info(f"创建新集合{COLLECTION_NAME}（维度{vector_dim}）成功")

    # 构建索引
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 100}
    }
    collection.create_index(
        field_name="vector",
        index_params=index_params,
        index_name="vector_index"
    )
    logger.info("向量索引创建成功")

    # 加载集合到内存
    collection.load()
    logger.info(f"集合{COLLECTION_NAME}已加载到内存，当前数据量：{collection.num_entities}条")
    return collection


def split_text_by_page(page_chunks: List[str], page_numbers: List[int], chunk_size: int = 512,
                       chunk_overlap: int = 128) -> Tuple[List[str], List[int]]:
    """按页分割文本，确保每个文本块绑定正确页码"""
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        length_function=len,
        separators=["\n\n", "\n", ". ", " ", ""]
    )

    final_chunks = []
    final_page_numbers = []

    for page_text, page_idx in zip(page_chunks, page_numbers):
        chunks = text_splitter.split_text(page_text)
        valid_chunks = [chunk.strip() for chunk in chunks if chunk.strip()]

        if not valid_chunks:
            logger.warning(f"PDF第{page_idx}页分割后无有效块，已跳过")
            continue

        final_chunks.extend(valid_chunks)
        final_page_numbers.extend([page_idx] * len(valid_chunks))
        logger.debug(f"PDF第{page_idx}页分割为{len(valid_chunks)}个有效块")

    logger.info(f"文本分割完成，共生成{len(final_chunks)}个有效文本块")
    return final_chunks, final_page_numbers


def process_text_to_milvus(
        pdf_reader: PdfReader,
        collection: Collection,
        embedding_model: DashScopeEmbeddings
) -> None:
    """整合PDF处理流程：提取→分割→嵌入→插入Milvus"""
    # 1. 提取每页文本与页码
    page_chunks, page_numbers = extract_text_with_page_numbers(pdf_reader)
    if not page_chunks:
        raise ValueError("PDF提取后无有效文本，无法继续处理")

    # 2. 按页分割文本
    final_chunks, final_page_numbers = split_text_by_page(page_chunks, page_numbers)
    if not final_chunks:
        raise ValueError("文本分割后无有效块，无法继续处理")

    # 3. 生成文本向量
    logger.info("开始生成文本块嵌入向量...")
    vectors = embedding_model.embed_documents(final_chunks)
    if len(vectors) != len(final_chunks):
        raise ValueError(f"向量数量（{len(vectors)}）与文本块数量（{len(final_chunks)}）不匹配")
    logger.info(f"向量生成完成，共{len(vectors)}个向量，维度：{len(vectors[0])}")

    # 4. 准备插入数据
    insert_data = []
    for chunk, vector, page in zip(final_chunks, vectors, final_page_numbers):
        if len(chunk) > 2047:
            chunk = chunk[:2044] + "..."  # 截断过长文本
        insert_data.append({
            "text_chunk": chunk,
            "vector": vector,
            "page_number": page
        })
    logger.info(f"准备插入Milvus的数据：{len(insert_data)}条")

    # 5. 插入Milvus
    try:
        insert_result = collection.insert(insert_data)
        collection.flush()  # 确保数据持久化
        logger.info(
            f"数据插入成功！插入行数：{insert_result.insert_count}，主键ID：{insert_result.primary_keys[:5]}...")
    except MilvusException as e:
        logger.error(f"数据插入失败：{str(e)}", exc_info=True)
        raise


def search_milvus(
        collection: Collection,
        query_text: str,
        embedding_model: DashScopeEmbeddings,
        top_k: int = 3
) -> List[Dict]:
    """在Milvus中搜索相似文本块"""
    # 生成查询向量
    query_vector = embedding_model.embed_query(query_text)

    # 搜索参数
    search_params = {
        "metric_type": "L2",
        "params": {"nprobe": 10}
    }

    # 执行搜索
    try:
        search_result = collection.search(
            data=[query_vector],
            anns_field="vector",
            param=search_params,
            limit=top_k,
            output_fields=["text_chunk", "page_number"],
            consistency_level="Strong"
        )
    except MilvusException as e:
        logger.error(f"Milvus搜索失败：{str(e)}", exc_info=True)
        raise

    # 格式化结果
    formatted_results = []
    for hit in search_result[0]:
        formatted_results.append({
            "text_chunk": hit.entity.get("text_chunk"),
            "page_number": hit.entity.get("page_number"),
            "similarity_distance": hit.distance  # 距离越小相似度越高
        })
    logger.info(f"搜索完成，返回{len(formatted_results)}条结果（关键词：{query_text}）")
    return formatted_results


if __name__ == "__main__":
    # 1. 读取PDF文件
    pdf_path = "./浦发上海浦东发展银行西安分行个金客户经理考核办法.pdf"
    if not os.path.exists(pdf_path):
        raise FileNotFoundError(f"目标PDF文件不存在：{pdf_path}")
    pdf_reader = PdfReader(pdf_path)
    logger.info(f"成功读取PDF文件：{pdf_path}，总页数：{len(pdf_reader.pages)}")

    # 2. 初始化嵌入模型并获取维度
    if not DASHSCOPE_API_KEY:
        raise ValueError("未配置DASHSCOPE_API_KEY，请在.env文件中添加")
    embedding_model = DashScopeEmbeddings(model=EMBEDDING_MODEL, dashscope_api_key=DASHSCOPE_API_KEY)
    vector_dim = get_embedding_dimension(embedding_model)

    # 3. 初始化Milvus集合
    milvus_collection = init_milvus_collection(vector_dim)

    # 4. 处理文本并插入Milvus
    logger.info("开始执行PDF文本处理→向量插入流程...")
    process_text_to_milvus(pdf_reader, milvus_collection, embedding_model)
    logger.info("PDF处理→插入Milvus流程完成！")

    # 5. 演示搜索功能
    logger.info("\n=== 演示Milvus搜索（基于浦发个金客户经理考核办法）===")
    query_examples = [
        "个金客户经理的业绩考核指标有哪些？",
        "个金客户经理的职位分为哪几个等级？",
        "工作质量考核的扣分标准是什么？"
    ]
    for query in query_examples:
        logger.info(f"\n查询关键词：{query}")
        results = search_milvus(milvus_collection, query, embedding_model, top_k=2)
        for i, res in enumerate(results, start=1):
            logger.info(f"  Top-{i} 相似度距离：{res['similarity_distance']:.4f}")
            logger.info(f"  对应PDF页码：第{res['page_number']}页")
            logger.info(f"  匹配文本：{res['text_chunk'][:150]}..." if len(
                res['text_chunk']) > 150 else f"  匹配文本：{res['text_chunk']}")

    # 6. 释放资源
    milvus_collection.release()
    logger.info("\nMilvus集合已释放内存资源，程序结束")

