"""
向量存储与检索模块
负责文档的向量化存储和相似性检索
"""

import numpy as np
from typing import List, Dict, Any, Tuple
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import redis
import json
from base.config import config
from base.logger import logger


class VectorStore:
    """向量存储与检索"""
    
    def __init__(self):
        self.collection_name = config.get('milvus', 'collection_name', fallback='edu_rag_collection')
        self.dimension = config.getint('embedding', 'dimension', fallback=768)
        self.redis_client = None
        self.collection = None
        self._init_connections()
        self._init_collection()
    
    def _init_connections(self):
        """初始化数据库连接"""
        try:
            # 连接 Milvus
            milvus_host = config.get('milvus', 'host', fallback='localhost')
            milvus_port = config.getint('milvus', 'port', fallback=19530)
            
            connections.connect(
                alias="default",
                host=milvus_host,
                port=milvus_port
            )
            logger.info(f"已连接到 Milvus: {milvus_host}:{milvus_port}")
            
            # 连接 Redis
            redis_host = config.get('redis', 'host', fallback='localhost')
            redis_port = config.getint('redis', 'port', fallback=6379)
            redis_db = config.getint('redis', 'db', fallback=0)
            
            self.redis_client = redis.Redis(
                host=redis_host,
                port=redis_port,
                db=redis_db,
                decode_responses=True
            )
            logger.info(f"已连接到 Redis: {redis_host}:{redis_port}")
            
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise
    
    def _init_collection(self):
        """初始化 Milvus 集合"""
        try:
            # 检查集合是否存在
            if utility.has_collection(self.collection_name):
                self.collection = Collection(self.collection_name)
                logger.info(f"加载现有集合: {self.collection_name}")
            else:
                # 创建新集合
                self._create_collection()
                
        except Exception as e:
            logger.error(f"集合初始化失败: {e}")
            raise
    
    def _create_collection(self):
        """创建 Milvus 集合"""
        try:
            # 定义字段
            fields = [
                FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
                FieldSchema(name="doc_id", dtype=DataType.VARCHAR, max_length=100),
                FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=65535),
                FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.dimension),
                FieldSchema(name="metadata", dtype=DataType.VARCHAR, max_length=1000)
            ]
            
            # 创建集合模式
            schema = CollectionSchema(fields, f"EduRAG collection for {self.collection_name}")
            
            # 创建集合
            self.collection = Collection(self.collection_name, schema)
            
            # 创建索引
            index_params = {
                "metric_type": "COSINE",
                "index_type": "IVF_FLAT",
                "params": {"nlist": 128}
            }
            self.collection.create_index("embedding", index_params)
            
            logger.info(f"成功创建集合: {self.collection_name}")
            
        except Exception as e:
            logger.error(f"创建集合失败: {e}")
            raise
    
    def insert_documents(self, documents: List[Dict[str, Any]]) -> bool:
        """
        插入文档到向量数据库
        
        Args:
            documents: 文档列表，每个文档包含 doc_id, content, embedding, metadata
            
        Returns:
            是否插入成功
        """
        try:
            if not documents:
                logger.warning("没有文档需要插入")
                return True
            
            # 准备数据
            doc_ids = [doc['doc_id'] for doc in documents]
            contents = [doc['content'] for doc in documents]
            embeddings = [doc['embedding'] for doc in documents]
            metadatas = [json.dumps(doc.get('metadata', {})) for doc in documents]
            
            # 插入数据
            entities = [doc_ids, contents, embeddings, metadatas]
            insert_result = self.collection.insert(entities)
            
            # 刷新集合
            self.collection.flush()
            
            # 缓存到 Redis
            self._cache_documents(documents)
            
            logger.info(f"成功插入 {len(documents)} 个文档")
            return True
            
        except Exception as e:
            logger.error(f"插入文档失败: {e}")
            return False
    
    def search_similar(self, query_embedding: List[float], top_k: int = 5, 
                      similarity_threshold: float = 0.7) -> List[Dict[str, Any]]:
        """
        相似性检索
        
        Args:
            query_embedding: 查询向量
            top_k: 返回结果数量
            similarity_threshold: 相似度阈值
            
        Returns:
            检索结果列表
        """
        try:
            # 加载集合
            self.collection.load()
            
            # 搜索参数
            search_params = {
                "metric_type": "COSINE",
                "params": {"nprobe": 10}
            }
            
            # 执行搜索
            results = self.collection.search(
                data=[query_embedding],
                anns_field="embedding",
                param=search_params,
                limit=top_k,
                output_fields=["doc_id", "content", "metadata"]
            )
            
            # 处理结果
            similar_docs = []
            for hit in results[0]:
                if hit.score >= similarity_threshold:
                    doc = {
                        'doc_id': hit.entity.get('doc_id'),
                        'content': hit.entity.get('content'),
                        'metadata': json.loads(hit.entity.get('metadata', '{}')),
                        'score': hit.score
                    }
                    similar_docs.append(doc)
            
            logger.info(f"检索到 {len(similar_docs)} 个相似文档")
            return similar_docs
            
        except Exception as e:
            logger.error(f"相似性检索失败: {e}")
            return []
    
    def _cache_documents(self, documents: List[Dict[str, Any]]):
        """缓存文档到 Redis"""
        try:
            if not self.redis_client:
                return
            
            for doc in documents:
                cache_key = f"doc:{doc['doc_id']}"
                cache_data = {
                    'content': doc['content'],
                    'metadata': doc.get('metadata', {})
                }
                self.redis_client.setex(
                    cache_key, 
                    3600,  # 1小时过期
                    json.dumps(cache_data)
                )
                
        except Exception as e:
            logger.warning(f"缓存文档失败: {e}")
    
    def get_cached_document(self, doc_id: str) -> Dict[str, Any]:
        """从缓存获取文档"""
        try:
            if not self.redis_client:
                return {}
            
            cache_key = f"doc:{doc_id}"
            cached_data = self.redis_client.get(cache_key)
            
            if cached_data:
                return json.loads(cached_data)
            
        except Exception as e:
            logger.warning(f"获取缓存文档失败: {e}")
        
        return {}
    
    def delete_collection(self):
        """删除集合"""
        try:
            if utility.has_collection(self.collection_name):
                utility.drop_collection(self.collection_name)
                logger.info(f"已删除集合: {self.collection_name}")
        except Exception as e:
            logger.error(f"删除集合失败: {e}")
    
    def get_collection_stats(self) -> Dict[str, Any]:
        """获取集合统计信息"""
        try:
            if not self.collection:
                return {}
            
            self.collection.load()
            stats = {
                'name': self.collection_name,
                'num_entities': self.collection.num_entities,
                'dimension': self.dimension
            }
            
            return stats
            
        except Exception as e:
            logger.error(f"获取集合统计信息失败: {e}")
            return {}