"""
向量数据库管理模块（使用Milvus Lite）
"""
import os
import sys
from typing import List, Dict
import numpy as np
from datetime import datetime

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from pymilvus import (
    connections,
    Collection,
    CollectionSchema,
    FieldSchema,
    DataType,
    utility
)
from config import VECTOR_DB_CONFIG
from utils.logger import setup_logger

logger = setup_logger(__name__)

class VectorStore:
    def __init__(self):
        """初始化向量存储"""
        self.collection_name = VECTOR_DB_CONFIG['collection_name']
        self.dimension = VECTOR_DB_CONFIG['dimension']
        self.collection = None
        
        try:
            # 确保milvus_data目录存在
            milvus_path = os.path.join(os.getcwd(), "milvus_data")
            os.makedirs(milvus_path, exist_ok=True)
            
            # 设置数据库文件路径
            db_path = os.path.join(milvus_path, "milvus.db")
            
            # 连接到Milvus Lite
            connections.connect(
                alias="default",
                uri=db_path,  # 使用具体的.db文件路径
                local_path=milvus_path
            )
            logger.info("成功连接到Milvus Lite")
            
            # 确保collection存在
            self._ensure_collection()
            
        except Exception as e:
            logger.error(f"初始化Milvus Lite失败: {str(e)}")
            raise

    def _ensure_collection(self):
        """确保collection存在，如果不存在则创建"""
        try:
            # 检查collection是否存在
            if not utility.has_collection(self.collection_name):
                # 定义字段，使用JSON字段存储元数据以支持多种格式
                fields = [
                    FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
                    FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
                    FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.dimension),
                    FieldSchema(name="metadata", dtype=DataType.JSON)  # 使用JSON存储所有元数据
                ]

                # 创建collection
                schema = CollectionSchema(fields=fields, description="Enterprise Knowledge Base")
                self.collection = Collection(self.collection_name, schema)

                # 创建索引
                index_params = {
                    "metric_type": VECTOR_DB_CONFIG['metric_type'],
                    "index_type": VECTOR_DB_CONFIG['index_type'],
                    "params": VECTOR_DB_CONFIG['index_params']
                }
                self.collection.create_index("embedding", index_params)
                self.collection.load()
            else:
                self.collection = Collection(self.collection_name)
                self.collection.load()
        except Exception as e:
            logger.error(f"确保collection存在时出错: {e}")
            raise

    def _get_collection(self) -> Collection:
        """获取collection，如果不存在则创建"""
        if self.collection is None:
            if utility.has_collection(self.collection_name):
                self.collection = Collection(self.collection_name)
                self.collection.load()
            else:
                self._ensure_collection()
        return self.collection

    def add_documents(self, documents, embeddings):
        """添加文档到向量库"""
        try:
            collection = self._get_collection()
            
            # 准备数据，确保元数据格式统一
            data = []
            for doc, embedding in zip(documents, embeddings):
                # 构建标准化的元数据
                metadata = doc.get('metadata', {})
                if not isinstance(metadata, dict):
                    metadata = {'content': metadata}
                
                # 确保基本字段存在
                metadata.update({
                    'source': metadata.get('source', ''),
                    'page': metadata.get('page', 0),
                    'type': metadata.get('type', 'unknown'),
                    'created_at': metadata.get('created_at', str(datetime.now()))
                })
                
                data.append({
                    'text': doc['text'],
                    'embedding': embedding,
                    'metadata': metadata
                })
            
            # 插入数据
            collection.insert(data)
            collection.flush()
            return True
        except Exception as e:
            logger.error(f"添加文档到向量库时出错: {e}")
            return False

    def search_similar(self, query_vector, top_k=5, min_score=0.3, filter_expr=None):
        """搜索相似文档，支持相似度阈值和过滤条件"""
        try:
            collection = self._get_collection()
            
            search_params = {
                "metric_type": VECTOR_DB_CONFIG['metric_type'],
                "params": VECTOR_DB_CONFIG['search_params']
            }
            
            # 构建搜索请求
            search_kwargs = {
                "data": [query_vector],
                "anns_field": "embedding",
                "param": search_params,
                "limit": top_k * 2,  # 检索更多结果以便过滤
                "output_fields": ["text", "metadata"]
            }
            
            # 如果有过滤条件，转换为JSON路径表达式
            if filter_expr:
                # 替换简单的字段访问为JSON路径访问
                for field in ['source', 'page', 'type']:
                    filter_expr = filter_expr.replace(
                        f'{field} ==',
                        f'metadata["{field}"] =='
                    )
                search_kwargs["expr"] = filter_expr
            
            # 执行搜索
            results = collection.search(**search_kwargs)
            
            similar_docs = []
            for hits in results:
                for hit in hits:
                    # 只保留相似度高于阈值的结果
                    if hit.distance >= min_score:
                        doc = {
                            'text': hit.entity.text,
                            'metadata': hit.entity.metadata,  # 保留完整的元数据
                            'similarity': hit.distance
                        }
                        similar_docs.append(doc)
                        
                    if len(similar_docs) >= top_k:
                        break
            
            return similar_docs
        except Exception as e:
            logger.error(f"搜索相似文档时出错: {e}")
            return []

    def clear(self):
        """清空向量库"""
        try:
            if utility.has_collection(self.collection_name):
                utility.drop_collection(self.collection_name)
                logger.info(f"已清空向量库: {self.collection_name}")
                self.collection = None
            return True
        except Exception as e:
            logger.error(f"清空向量库时出错: {e}")
            return False

    def get_page_segments(self, source: str, page: int) -> List[str]:
        """获取指定页面的所有文本段落"""
        try:
            collection = self._get_collection()
            
            # 构建JSON路径表达式
            expr = f'metadata["source"] == "{source}" && metadata["page"] == {page}'
            
            # 执行查询
            results = collection.query(
                expr=expr,
                output_fields=["text"],
                consistency_level="Strong"
            )
            
            # 提取文本段落
            segments = []
            for result in results:
                if 'text' in result:
                    segments.append(result['text'])
            
            return segments
            
        except Exception as e:
            logger.error(f"获取页面段落时出错: {e}")
            return []
