"""
向量存储处理模块
使用 Milvus 实现文档向量的存储和检索
"""
import os
from typing import List, Dict, Any, Optional, Tuple
import uuid
import logging

from pymilvus import (
    connections,
    Collection,
    CollectionSchema,
    FieldSchema,
    DataType,
    utility,
)
import openai
from langchain_core.documents import Document

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class VectorStoreHandler:
    """
    向量存储处理器
    
    功能：
    1. 连接 Milvus 向量数据库
    2. 创建和管理集合（Collection）
    3. 将文本块转换为向量并存储
    4. 实现 topK 相似性搜索
    """
    
    def __init__(
        self,
        host: str = "localhost",
        port: int = 19530,
        user: str = "",
        password: str = "",
        collection_name: str = "poetry_rag_collection",
        embedding_model: str = "text-embedding-ada-002",
        embedding_dimension: int = 1536,
        openai_api_key: str = "",
        openai_api_base: str = "",
    ):
        """
        初始化向量存储处理器
        
        :param host: Milvus 服务地址
        :param port: Milvus 服务端口
        :param user: Milvus 用户名（可选）
        :param password: Milvus 密码（可选）
        :param collection_name: 集合名称
        :param embedding_model: Embedding 模型名称
        :param embedding_dimension: 向量维度
        :param openai_api_key: OpenAI API Key
        :param openai_api_base: OpenAI API Base URL（可选）
        """
        self.host = host
        self.port = port
        self.user = user
        self.password = password
        self.collection_name = collection_name
        self.embedding_model = embedding_model
        self.embedding_dimension = embedding_dimension
        
        # 配置 OpenAI
        if openai_api_key:
            openai.api_key = openai_api_key
        if openai_api_base:
            openai.api_base = openai_api_base
        
        self.collection: Optional[Collection] = None
        self._connect_to_milvus()
    
    def _connect_to_milvus(self):
        """连接到 Milvus 服务"""
        try:
            # 构建连接参数
            connect_args = {
                "host": self.host,
                "port": self.port,
            }
            
            if self.user and self.password:
                connect_args["user"] = self.user
                connect_args["password"] = self.password
            
            # 连接到 Milvus
            connections.connect(alias="default", **connect_args)
            logger.info(f"成功连接到 Milvus: {self.host}:{self.port}")
            
        except Exception as e:
            logger.error(f"连接 Milvus 失败: {e}")
            raise RuntimeError(f"无法连接到 Milvus: {e}")
    
    def create_collection(
        self,
        dimension: Optional[int] = None,
        drop_if_exists: bool = False
    ):
        """
        创建 Milvus 集合
        
        :param dimension: 向量维度，默认使用初始化时的维度
        :param drop_if_exists: 如果集合存在是否删除重建
        """
        if dimension is None:
            dimension = self.embedding_dimension
        
        try:
            # 检查集合是否存在
            if utility.has_collection(self.collection_name):
                if drop_if_exists:
                    logger.info(f"删除已存在的集合: {self.collection_name}")
                    utility.drop_collection(self.collection_name)
                else:
                    logger.info(f"集合已存在: {self.collection_name}")
                    self.collection = Collection(self.collection_name)
                    return
            
            # 定义集合的 Schema
            fields = [
                FieldSchema(
                    name="id",
                    dtype=DataType.VARCHAR,
                    is_primary=True,
                    max_length=36,
                    description="文档块的唯一标识符"
                ),
                FieldSchema(
                    name="vector",
                    dtype=DataType.FLOAT_VECTOR,
                    dim=dimension,
                    description="文本的向量表示"
                ),
                FieldSchema(
                    name="text",
                    dtype=DataType.VARCHAR,
                    max_length=65535,
                    description="原始文本内容"
                ),
                FieldSchema(
                    name="metadata",
                    dtype=DataType.VARCHAR,
                    max_length=10000,
                    description="文档元数据（JSON 格式）"
                ),
            ]
            
            schema = CollectionSchema(
                fields=fields,
                description="RAG 文档向量集合"
            )
            
            # 创建集合
            self.collection = Collection(
                name=self.collection_name,
                schema=schema,
                using='default'
            )
            
            logger.info(f"成功创建集合: {self.collection_name}")
            
            # 创建索引以加速搜索
            self._create_index()
            
        except Exception as e:
            logger.error(f"创建集合失败: {e}")
            raise RuntimeError(f"无法创建集合: {e}")
    
    def _create_index(self):
        """为向量字段创建索引"""
        try:
            # 定义索引参数
            index_params = {
                "metric_type": "IP",  # 内积（Inner Product），也可以使用 "L2" 或 "COSINE"
                "index_type": "IVF_FLAT",  # 索引类型
                "params": {"nlist": 128}  # 索引参数
            }
            
            # 创建索引
            self.collection.create_index(
                field_name="vector",
                index_params=index_params
            )
            
            logger.info(f"成功为集合 {self.collection_name} 创建索引")
            
        except Exception as e:
            logger.error(f"创建索引失败: {e}")
            raise RuntimeError(f"无法创建索引: {e}")
    
    def load_collection(self):
        """加载集合到内存（搜索前必须执行）"""
        try:
            if self.collection is None:
                if utility.has_collection(self.collection_name):
                    self.collection = Collection(self.collection_name)
                else:
                    raise ValueError(f"集合不存在: {self.collection_name}")
            
            self.collection.load()
            logger.info(f"成功加载集合到内存: {self.collection_name}")
            
        except Exception as e:
            logger.error(f"加载集合失败: {e}")
            raise RuntimeError(f"无法加载集合: {e}")
    
    def get_embedding(self, text: str) -> List[float]:
        """
        将文本转换为向量
        
        :param text: 输入文本
        :return: 向量表示
        """
        try:
            # 使用 OpenAI Embedding API
            response = openai.embeddings.create(
                model=self.embedding_model,
                input=text
            )
            
            embedding = response.data[0].embedding
            return embedding
            
        except Exception as e:
            logger.error(f"生成向量失败: {e}")
            raise RuntimeError(f"无法生成向量: {e}")
    
    def get_embeddings_batch(self, texts: List[str]) -> List[List[float]]:
        """
        批量将文本转换为向量
        
        :param texts: 文本列表
        :return: 向量列表
        """
        try:
            # 使用 OpenAI Embedding API 批量处理
            response = openai.embeddings.create(
                model=self.embedding_model,
                input=texts
            )
            
            embeddings = [item.embedding for item in response.data]
            return embeddings
            
        except Exception as e:
            logger.error(f"批量生成向量失败: {e}")
            raise RuntimeError(f"无法批量生成向量: {e}")
    
    def insert_documents(
        self,
        documents: List[Document],
        batch_size: int = 100
    ) -> List[str]:
        """
        将文档列表插入到向量数据库
        
        :param documents: Document 对象列表
        :param batch_size: 批处理大小
        :return: 插入的文档 ID 列表
        """
        if not documents:
            logger.warning("文档列表为空，跳过插入")
            return []
        
        try:
            # 确保集合已创建和加载
            if self.collection is None:
                self.create_collection()
            
            inserted_ids = []
            
            # 分批处理
            for i in range(0, len(documents), batch_size):
                batch = documents[i:i + batch_size]
                
                # 提取文本和元数据
                texts = [doc.page_content for doc in batch]
                metadatas = [str(doc.metadata) for doc in batch]
                
                # 生成向量
                logger.info(f"正在生成向量: 批次 {i // batch_size + 1}, 文档数 {len(texts)}")
                vectors = self.get_embeddings_batch(texts)
                
                # 生成唯一 ID
                ids = [str(uuid.uuid4()) for _ in range(len(batch))]
                
                # 准备插入数据
                entities = [
                    ids,
                    vectors,
                    texts,
                    metadatas,
                ]
                
                # 插入到集合
                self.collection.insert(entities)
                inserted_ids.extend(ids)
                
                logger.info(f"成功插入批次 {i // batch_size + 1}: {len(batch)} 个文档")
            
            # 刷新以确保数据持久化
            self.collection.flush()
            logger.info(f"总共插入 {len(inserted_ids)} 个文档到集合 {self.collection_name}")
            
            return inserted_ids
            
        except Exception as e:
            logger.error(f"插入文档失败: {e}")
            raise RuntimeError(f"无法插入文档: {e}")
    
    def search_similar(
        self,
        query_text: str,
        top_k: int = 5,
        output_fields: Optional[List[str]] = None
    ) -> List[Dict[str, Any]]:
        """
        搜索与查询文本最相似的文档
        
        :param query_text: 查询文本
        :param top_k: 返回最相似的 top_k 个结果
        :param output_fields: 需要返回的字段列表，默认返回所有字段
        :return: 搜索结果列表，每个结果包含文档内容、元数据和相似度分数
        """
        try:
            # 确保集合已加载
            if self.collection is None:
                self.load_collection()
            else:
                # 检查集合是否已加载
                try:
                    self.collection.load()
                except:
                    pass  # 集合可能已经加载
            
            # 生成查询向量
            query_vector = self.get_embedding(query_text)
            
            # 设置返回字段
            if output_fields is None:
                output_fields = ["id", "text", "metadata"]
            
            # 定义搜索参数
            search_params = {
                "metric_type": "IP",
                "params": {"nprobe": 10}
            }
            
            # 执行搜索
            results = self.collection.search(
                data=[query_vector],
                anns_field="vector",
                param=search_params,
                limit=top_k,
                output_fields=output_fields
            )
            
            # 格式化结果
            formatted_results = []
            for hits in results:
                for hit in hits:
                    result = {
                        "id": hit.id,
                        "score": hit.score,
                        "distance": hit.distance,
                    }
                    
                    # 添加输出字段
                    for field in output_fields:
                        if hasattr(hit.entity, field):
                            result[field] = getattr(hit.entity, field)
                    
                    formatted_results.append(result)
            
            logger.info(f"搜索完成，返回 {len(formatted_results)} 个结果")
            return formatted_results
            
        except Exception as e:
            logger.error(f"搜索失败: {e}")
            raise RuntimeError(f"无法执行搜索: {e}")
    
    def search_similar_batch(
        self,
        query_texts: List[str],
        top_k: int = 5,
        output_fields: Optional[List[str]] = None
    ) -> List[List[Dict[str, Any]]]:
        """
        批量搜索与查询文本最相似的文档
        
        :param query_texts: 查询文本列表
        :param top_k: 每个查询返回最相似的 top_k 个结果
        :param output_fields: 需要返回的字段列表
        :return: 搜索结果列表的列表
        """
        try:
            # 确保集合已加载
            if self.collection is None:
                self.load_collection()
            else:
                try:
                    self.collection.load()
                except:
                    pass
            
            # 批量生成查询向量
            query_vectors = self.get_embeddings_batch(query_texts)
            
            # 设置返回字段
            if output_fields is None:
                output_fields = ["id", "text", "metadata"]
            
            # 定义搜索参数
            search_params = {
                "metric_type": "IP",
                "params": {"nprobe": 10}
            }
            
            # 执行批量搜索
            results = self.collection.search(
                data=query_vectors,
                anns_field="vector",
                param=search_params,
                limit=top_k,
                output_fields=output_fields
            )
            
            # 格式化结果
            all_formatted_results = []
            for hits in results:
                formatted_results = []
                for hit in hits:
                    result = {
                        "id": hit.id,
                        "score": hit.score,
                        "distance": hit.distance,
                    }
                    
                    for field in output_fields:
                        if hasattr(hit.entity, field):
                            result[field] = getattr(hit.entity, field)
                    
                    formatted_results.append(result)
                
                all_formatted_results.append(formatted_results)
            
            logger.info(f"批量搜索完成，处理 {len(query_texts)} 个查询")
            return all_formatted_results
            
        except Exception as e:
            logger.error(f"批量搜索失败: {e}")
            raise RuntimeError(f"无法执行批量搜索: {e}")
    
    def delete_by_ids(self, ids: List[str]) -> int:
        """
        根据 ID 删除文档
        
        :param ids: 要删除的文档 ID 列表
        :return: 删除的文档数量
        """
        try:
            if not ids:
                logger.warning("ID 列表为空，跳过删除")
                return 0
            
            # 构建删除表达式
            expr = f"id in {ids}"
            
            # 执行删除
            self.collection.delete(expr)
            self.collection.flush()
            
            logger.info(f"成功删除 {len(ids)} 个文档")
            return len(ids)
            
        except Exception as e:
            logger.error(f"删除文档失败: {e}")
            raise RuntimeError(f"无法删除文档: {e}")
    
    def get_collection_stats(self) -> Dict[str, Any]:
        """
        获取集合统计信息
        
        :return: 统计信息字典
        """
        try:
            if self.collection is None:
                self.load_collection()
            
            stats = {
                "collection_name": self.collection_name,
                "num_entities": self.collection.num_entities,
                "description": self.collection.description,
            }
            
            return stats
            
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            raise RuntimeError(f"无法获取统计信息: {e}")
    
    def close(self):
        """关闭连接"""
        try:
            if self.collection is not None:
                self.collection.release()
            connections.disconnect("default")
            logger.info("成功关闭 Milvus 连接")
        except Exception as e:
            logger.error(f"关闭连接失败: {e}")


def create_vector_store_from_config(config: Dict[str, Any]) -> VectorStoreHandler:
    """
    根据配置创建向量存储处理器
    
    :param config: 配置字典
    :return: VectorStoreHandler 实例
    """
    return VectorStoreHandler(
        host=config.get("MILVUS_HOST", "localhost"),
        port=config.get("MILVUS_PORT", 19530),
        user=config.get("MILVUS_USER", ""),
        password=config.get("MILVUS_PASSWORD", ""),
        collection_name=config.get("MILVUS_COLLECTION_NAME", "poetry_rag_collection"),
        embedding_model=config.get("EMBEDDING_MODEL", "text-embedding-ada-002"),
        embedding_dimension=config.get("EMBEDDING_DIMENSION", 1536),
        openai_api_key=config.get("OPENAI_API_KEY", ""),
        openai_api_base=config.get("OPENAI_API_BASE", ""),
    )


