"""
vector_store.py是EduRAG系统的核心模块之一，封装了与Milvus向量数据库的交互逻辑。
它负责将文档转化为向量存储到数据库中，并提供高效的混合检索功能。
通过结合BGE-M3嵌入模型和重排序机制，该模块确保系统能够快速检索到与用户查询最相关的文档。
"""
import hashlib
import os
import sys
import torch
from typing import List, Optional, Dict, Any
from langchain_core.documents import Document

# 导入混合向量生成模型
from milvus_model.hybrid import BGEM3EmbeddingFunction
# 导入milvus相关类，用于操作Milvus向量数据库
from pymilvus import (MilvusClient, DataType, AnnSearchRequest,
                      WeightedRanker, MilvusException)
# 导入重排序模型
from sentence_transformers import CrossEncoder

# 导入基础模块配置和日志模块
from base.config import get_config
from base.logger import get_logger
from rag_qa.core.document_processor import process_documents

# 设置项目路径
def setup_project_paths() -> tuple:
    """设置项目路径并添加到系统路径"""
    local_path = os.path.abspath(os.path.dirname(__file__))
    rag_qa_path = os.path.abspath(os.path.dirname(local_path))
    project_root = os.path.abspath(os.path.dirname(rag_qa_path))

    # 添加路径到系统路径（去重处理）
    for path in [project_root, rag_qa_path]:
        if path not in sys.path:
            sys.path.insert(0, path)

    return local_path, rag_qa_path, project_root

# 初始化路径
local_path, rag_qa_path, project_root = setup_project_paths()

# 实例化配置对象和日志对象
config = get_config()
logger = get_logger()

class VectorStore:
    """
    VectorStore类封装了与Milvus向量数据库的交互逻辑，处理文档的向量化存储和检索。
    """
    def __init__(self,
                 collection_name: str = config.MILVUS_COLLECTION_NAME,
                 host: str = config.MILVUS_HOST,
                 port: int = config.MILVUS_PORT,
                 database: str = config.MILVUS_DATABASE_NAME):
        # 1. 初始化向量存储配置
        self.collection_name = collection_name
        self.host = host
        self.port = port
        self.database = database

        # 2. 初始化日志
        self.logger = logger

        # 3. 初始化设备
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.logger.info(f"使用设备：{self.device}")

        # 4. 初始化模型
        self.reranker = self._init_reranker()
        self.embedding_function, self.dense_dim = self._init_embedding_function()

        # 5. 初始化Milvus客户端
        self.client = self._init_milvus_client()

        # 6. 创建或加载向量集合
        self._create_or_load_collection()

    def _init_reranker(self) -> CrossEncoder:
        """初始化重排序模型"""
        try:
            rerank_model_path = os.path.join(rag_qa_path, "models", "bge-reranker-large")
            if not os.path.exists(rerank_model_path):
                raise FileNotFoundError(f"重排序模型路径不存在: {rerank_model_path}")

            return CrossEncoder(
                rerank_model_path,
                device=self.device,
                tokenizer_kwargs={
                    "padding": "longest",
                    "truncation": True,
                    "max_length": 512,
                    "return_tensors": "pt"
                }
            )
        except Exception as e:
            self.logger.error(f"初始化重排序模型失败: {str(e)}")
            raise

    def _init_embedding_function(self) -> tuple[BGEM3EmbeddingFunction, int]:
        """初始化BGE-M3嵌入模型"""
        try:
            bge_m3_model_path = os.path.join(rag_qa_path, "models", "bge-m3")
            if not os.path.exists(bge_m3_model_path):
                raise FileNotFoundError(f"BGE-M3模型路径不存在: {bge_m3_model_path}")

            embedding_func = BGEM3EmbeddingFunction(
                bge_m3_model_path,
                device=self.device,
                use_fp16=False
            )

            # 验证向量维度
            dims = embedding_func.dim
            if 'dense' not in dims:
                raise ValueError("嵌入模型不包含稠密向量维度信息")

            self.logger.info(f"嵌入模型向量维度: {dims}")
            return embedding_func, dims['dense']
        except Exception as e:
            self.logger.error(f"初始化嵌入模型失败: {str(e)}")
            raise

    def _init_milvus_client(self) -> MilvusClient:
        """初始化Milvus客户端"""
        try:
            client = MilvusClient(uri=f"http://{self.host}:{self.port}", db_name=self.database)
            self.logger.info(f"成功连接到Milvus数据库: {self.database}")
            return client
        except MilvusException as e:
            self.logger.error(f"连接Milvus失败: {str(e)}")
            raise

    def _create_or_load_collection(self) -> None:
        """创建或加载向量集合"""
        try:
            if not self.client.has_collection(self.collection_name):
                self.logger.info(f"向量集合 {self.collection_name} 不存在，正在创建...")

                # 创建集合schema
                schema = self.client.create_schema(auto_id=False, enable_dynamic_field=True)
                schema.add_field(field_name="id", datatype=DataType.VARCHAR,
                                max_length=100, is_primary=True)
                schema.add_field(field_name="text", datatype=DataType.VARCHAR,
                                max_length=65535)
                schema.add_field(field_name="dense_vector", datatype=DataType.FLOAT_VECTOR,
                                dim=self.dense_dim)
                schema.add_field(field_name="sparse_vector", datatype=DataType.SPARSE_FLOAT_VECTOR)
                schema.add_field(field_name="parent_id", datatype=DataType.VARCHAR,
                                max_length=100)
                schema.add_field(field_name="parent_content", datatype=DataType.VARCHAR,
                                max_length=65535)
                schema.add_field(field_name="source", datatype=DataType.VARCHAR,
                                max_length=50)
                schema.add_field(field_name="timestamp", datatype=DataType.VARCHAR,
                                max_length=50)

                # 准备索引参数
                index_params = self.client.prepare_index_params()
                index_params.add_index(
                    field_name="dense_vector",
                    index_name="dense_index",
                    index_type="IVF_FLAT",
                    metric_type="IP",
                    params={"nlist": 1024}
                )
                index_params.add_index(
                    field_name="sparse_vector",
                    index_name="sparse_index",
                    index_type="SPARSE_INVERTED_INDEX",
                    metric_type="IP",
                    params={"drop_ratio_build": 0.2}
                )

                # 创建集合
                self.client.create_collection(
                    self.collection_name,
                    schema=schema,
                    index_params=index_params
                )
                self.logger.info(f"向量集合 {self.collection_name} 创建成功")
            else:
                self.logger.info(f"向量集合 {self.collection_name} 已存在，正在加载...")
        except MilvusException as e:
            self.logger.error(f"操作集合失败: {str(e)}")
            raise

    def _generate_document_id(self, content: str) -> str:
        """生成文档唯一ID"""
        return hashlib.md5(content.encode("utf-8")).hexdigest()

    def _process_sparse_vector(self, sparse_matrix) -> Dict[int, float]:
        """处理稀疏向量，转换为字典格式"""
        sparse_vector = {}
        indices = sparse_matrix.indices
        values = sparse_matrix.data

        for token_id, value in zip(indices, values):
            sparse_vector[token_id] = float(value)  # 确保类型一致性

        return sparse_vector

    def add_documents(self, documents: List[Document]) -> None:
        """
        向向量集合中添加文档
        :param documents: 文档列表
        """
        if not documents:
            self.logger.warning("没有需要添加的文档")
            return

        try:
            # 批量处理文档内容
            document_contents = [doc.page_content for doc in documents]
            embeddings = self.embedding_function(document_contents)

            # 准备存储向量数据
            vectors = []
            for index, doc in enumerate(documents):
                content = doc.page_content
                doc_id = self._generate_document_id(content)

                # 处理向量数据
                dense_vector = embeddings["dense"][index]
                sparse_matrix = embeddings["sparse"][[index], :]
                sparse_vector = self._process_sparse_vector(sparse_matrix)

                # 验证元数据完整性
                required_metadata = ["parent_id", "parent_content", "source", "timestamp"]
                for key in required_metadata:
                    if key not in doc.metadata:
                        raise ValueError(f"文档元数据缺少必要字段: {key}")

                vectors.append({
                    "id": doc_id,
                    "text": content,
                    "dense_vector": dense_vector,
                    "sparse_vector": sparse_vector,
                    "parent_id": doc.metadata["parent_id"],
                    "parent_content": doc.metadata["parent_content"],
                    "source": doc.metadata["source"],
                    "timestamp": doc.metadata["timestamp"],
                })

            # 批量插入数据
            if vectors:
                result = self.client.upsert(collection_name=self.collection_name, data=vectors)
                self.logger.info(
                    f"已向向量集合 {self.collection_name} 中添加 {result['upsert_count']} 条文档向量"
                )

        except Exception as e:
            self.logger.error(f"添加文档失败: {str(e)}")
            raise

    def hybrid_search_with_rerank(self,
                                query: str,
                                k: int = config.RETRIEVAL_K,
                                source_filter: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        实现混合检索并重新排序，返回最相关的K个文档块
        :param query: 查询文本
        :param k: 返回的文档块数量
        :param source_filter: 来源筛选（可选）
        :return: 最相关的K个文档块列表
        """
        if not query.strip():
            self.logger.warning("查询文本不能为空")
            return []

        try:
            # 生成查询向量
            query_embedding = self.embedding_function([query])
            query_dense_vector = query_embedding["dense"][0]

            # 处理稀疏向量
            query_sparse_matrix = query_embedding["sparse"][[0], :]
            sparse_query_vector = self._process_sparse_vector(query_sparse_matrix)

            # 构建过滤表达式
            filter_expr = f'source=="{source_filter}"' if source_filter else ''

            # 构建查询请求
            dense_request = AnnSearchRequest(
                data=[query_dense_vector],
                anns_field="dense_vector",
                param={'metric_type': 'IP', 'params': {'nprobe': 10}},
                limit=k,
                expr=filter_expr,
            )

            sparse_request = AnnSearchRequest(
                data=[sparse_query_vector],
                anns_field="sparse_vector",
                param={'metric_type': 'IP', 'params': {}},
                limit=k,
                expr=filter_expr,
            )

            # 执行混合搜索
            ranker = WeightedRanker(1.0, 0.7)
            results = self.client.hybrid_search(
                collection_name=self.collection_name,
                reqs=[dense_request, sparse_request],
                ranker=ranker,
                limit=k,
                output_fields=["text", "parent_id", "parent_content", "source", "timestamp"]
            )[0]

            self.logger.info(f"混合检索完成，返回 {len(results)} 条结果")
            return results

        except Exception as e:
            self.logger.error(f"检索失败: {str(e)}")
            raise

if __name__ == '__main__':
    try:
        # 向量存储实例化
        vector_store = VectorStore()

        # 处理文档并添加到向量集合中
        data_path = os.path.join(rag_qa_path, "data", "ai_data")
        documents = process_documents(data_path)

        # 添加文档到向量集合中
        vector_store.add_documents(documents)

        # 示例查询
        # query = "AI学科的课程内容是什么?"
        # results = vector_store.hybrid_search_with_rerank(query, source_filter="ai")
        # print(f"查询结果: {results}")

    except Exception as e:
        logger.error(f"程序运行失败: {str(e)}", exc_info=True)
        sys.exit(1)