"""
向量数据库集成模块
使用ChromaDB作为向量存储，负责文档的存储、索引和相似性搜索
"""

import os
import uuid
import logging
from typing import List, Dict, Any, Optional, Tuple
import chromadb
from chromadb.config import Settings
from langchain_community.vectorstores import Chroma
from langchain.schema import Document
import numpy as np
import time
import hashlib
import json
from functools import lru_cache
import threading
from concurrent.futures import ThreadPoolExecutor

import torch  # 添加PyTorch导入



logger = logging.getLogger(__name__)


class VectorStoreManager:
    """向量数据库管理器"""
    
    def __init__(self, 
                 persist_directory: str = "./chroma_db",
                 embedding_model: str = None,
                 embedding_manager = None,
                 collection_name: str = "rag_documents"):
        """
        初始化向量数据库管理器
        
        Args:
            persist_directory: 持久化目录路径
            embedding_model: 嵌入模型名称（可选，优先使用环境变量配置）
            embedding_manager: 嵌入管理器实例（可选）
            collection_name: 集合名称
        """
        self.persist_directory = persist_directory
        self.collection_name = collection_name
        self.embedding_manager = embedding_manager
        
        # 从环境变量获取配置，确保使用本地模型
        env_embedding_model = os.getenv("EMBEDDING_MODEL", "./models/bge-large-zh-v1.5")
        self.embedding_model = embedding_model or env_embedding_model
        self.embedding_type = os.getenv("EMBEDDING_TYPE", "huggingface")
        
        logger.info("初始化向量存储管理器...")
        logger.info(f"  持久化目录: {persist_directory}")
        logger.info(f"  集合名称: {collection_name}")
        logger.info(f"  嵌入模型: {self.embedding_model}")
        
        # 确保目录存在
        os.makedirs(persist_directory, exist_ok=True)
        
        # 初始化嵌入模型
        self._initialize_embeddings()
        
        # 初始化Chroma客户端
        logger.info("正在初始化Chroma客户端...")
        client_start = time.time()
        self.client = chromadb.PersistentClient(
            path=persist_directory,
            settings=Settings(anonymized_telemetry=False)
        )
        client_time = time.time() - client_start
        logger.info(f"Chroma客户端初始化完成 (耗时: {client_time:.2f}s)")
        
        # 初始化Chroma向量存储
        self.vector_store = None
        self._initialize_vector_store()
        
        # 初始化缓存机制
        self._initialize_cache()
        
        # 初始化异步处理
        self._initialize_async_processing()
        
        # 性能统计
        self.performance_stats = {
            'total_queries': 0,
            'cache_hits': 0,
            'avg_query_time': 0.0,
            'last_10_queries': []
        }
    
    def _initialize_embeddings(self):
        """初始化嵌入模型 - 支持多种高性能模型"""
        logger.info("正在初始化嵌入模型...")
        start_time = time.time()
        
        # 如果传入了嵌入管理器，直接使用
        if self.embedding_manager:
            logger.info("使用传入的嵌入管理器")
            self.embeddings = self.embedding_manager
            
            # 设置默认的模型配置信息
            self.current_model_config = {
                "name": "external-embedding-manager",
                "dimension": 1024,  # 默认维度，实际可能不同
                "description": "外部嵌入管理器",
                "hub_name": "external"
            }
            self.device = "auto"  # 由外部管理器决定
            
            init_time = time.time() - start_time
            logger.info(f"嵌入模型初始化完成 (耗时: {init_time:.2f}s)")
            return

        try:
            # 支持的嵌入模型配置
            embedding_configs = {
                "bge-large-zh-v1.5": {
                    "name": "bge-large-zh-v1.5",
                    "dimension": 1024,
                    "description": "BAAI中文优化大模型",
                    "hub_name": "BAAI/bge-large-zh-v1.5"
                },
                "bge-base-zh-v1.5": {
                    "name": "bge-base-zh-v1.5", 
                    "dimension": 768,
                    "description": "BAAI中文优化基础模型",
                    "hub_name": "BAAI/bge-base-zh-v1.5"
                },
                "m3e-base": {
                    "name": "m3e-base",
                    "dimension": 768,
                    "description": "MokaAI中英双语模型",
                    "hub_name": "moka-ai/m3e-base"
                },
                "bge-large-zh-v1.5": {
                "name": "bge-large-zh-v1.5",
                "dimension": 1024,
                "description": "BAAI中文优化大模型",
                "hub_name": "BAAI/bge-large-zh-v1.5"
            },
            "all-MiniLM-L6-v2": {
                "name": "all-MiniLM-L6-v2",
                "dimension": 384,
                "description": "基础多语言模型",
                "hub_name": "sentence-transformers/all-MiniLM-L6-v2"
            }
            }
            
            # 从环境变量或配置选择模型
            model_selection = os.getenv("EMBEDDING_MODEL_TYPE", "bge-base-zh-v1.5")
            selected_config = embedding_configs.get(model_selection, embedding_configs["bge-base-zh-v1.5"])
            
            logger.info(f"选择嵌入模型: {selected_config['description']} ({selected_config['name']})")
            
            # 尝试本地路径
            local_model_path = None
            possible_local_paths = [
                f"./models/{selected_config['name']}",
                f"./models/{selected_config['name']}/sentence-transformers/{selected_config['name']}",
                self.embedding_model
            ]
            
            for path in possible_local_paths:
                if os.path.exists(path) and os.path.exists(os.path.join(path, "config.json")):
                    local_model_path = path
                    logger.info(f"使用本地模型: {local_model_path}")
                    break
            
            # 使用LangChain兼容的嵌入
            from langchain_community.embeddings import HuggingFaceEmbeddings
            
            # 自动检测GPU和内存
            if torch.cuda.is_available():
                device = "cuda"
                gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3  # GB
                
                # 根据GPU内存选择模型
                if gpu_memory < 4:  # 4GB以下GPU
                    selected_config = embedding_configs["bge-large-zh-v1.5"]
                    logger.warning(f"GPU内存不足({gpu_memory:.1f}GB)，降级使用: {selected_config['name']}")
                elif gpu_memory < 8:  # 8GB以下GPU
                    selected_config = embedding_configs["bge-base-zh-v1.5"]
                else:
                    selected_config = embedding_configs["bge-large-zh-v1.5"]
                    
                logger.info(f"检测到CUDA，使用GPU加速 ({gpu_memory:.1f}GB)")
            else:
                device = "cpu"
                # CPU模式下使用轻量级模型
                selected_config = embedding_configs["bge-base-zh-v1.5"]
                logger.warning("未检测到CUDA，使用CPU模式，选择轻量级模型")
            
            # 模型配置
            model_name = local_model_path or selected_config["hub_name"]
            model_kwargs = {
                'device': device,
                'trust_remote_code': True
            }
            encode_kwargs = {
                'normalize_embeddings': True,
                'batch_size': 32 if device == "cuda" else 8  # 根据设备调整批处理大小
            }
            
            # 针对bge模型的特殊配置
            if "bge" in selected_config["name"]:
                query_instruction = "为这个句子生成表示以用于检索相关文章："
                encode_kwargs['query_instruction'] = query_instruction
            
            self.embeddings = HuggingFaceEmbeddings(
                model_name=model_name,
                model_kwargs=model_kwargs,
                encode_kwargs=encode_kwargs,
                cache_folder="./models"
            )
            
            # 存储模型信息
            self.current_model_config = selected_config
            self.device = device
            
            load_time = time.time() - start_time
            logger.info(f"嵌入模型初始化完成 (耗时: {load_time:.2f}s)")
            logger.info(f"模型维度: {selected_config['dimension']}, 设备: {device}")

        except Exception as e:
            logger.error(f"初始化嵌入模型失败: {str(e)}")
            logger.info("回退到基础模型...")
            # 回退到基础模型
            self._fallback_to_basic_model()
            raise

    def _fallback_to_basic_model(self):
        """回退到基础模型"""
        logger.info("正在回退到基础嵌入模型...")
        try:
            from langchain_community.embeddings import HuggingFaceEmbeddings
            
            self.embeddings = HuggingFaceEmbeddings(
                model_name="sentence-transformers/all-MiniLM-L6-v2",
                model_kwargs={'device': 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
            self.current_model_config = {
                "name": "all-MiniLM-L6-v2",
                "dimension": 384,
                "description": "基础多语言模型",
                "hub_name": "sentence-transformers/all-MiniLM-L6-v2"
            }
            self.device = "cpu"
            logger.info("回退到基础模型成功")
        except Exception as e:
            logger.error(f"回退到基础模型失败: {e}")
            raise

    def _initialize_cache(self):
        """初始化缓存机制"""
        logger.info("正在初始化缓存机制...")
        
        # 查询缓存
        self.query_cache = {}
        self.cache_lock = threading.RLock()
        
        # 嵌入缓存
        self.embedding_cache = {}
        self.embedding_cache_size = 10000  # 最大缓存数量
        
        # 结果缓存TTL（秒）
        self.cache_ttl = 3600  # 1小时
        
        logger.info("缓存机制初始化完成")

    def _initialize_async_processing(self):
        """初始化异步处理"""
        logger.info("正在初始化异步处理...")
        
        # 线程池用于异步操作
        self.executor = ThreadPoolExecutor(max_workers=4)
        
        # 预加载常用查询
        self._preload_common_queries()
        
        logger.info("异步处理初始化完成")

    def _preload_common_queries(self):
        """预加载常用查询"""
        def preload_task():
            try:
                # 预加载空查询以初始化系统
                self.similarity_search("测试", k=1)
                logger.info("预加载完成")
            except Exception as e:
                logger.warning(f"预加载失败: {e}")
        
        # 异步执行预加载
        self.executor.submit(preload_task)

    def _get_cache_key(self, query: str, k: int = 4, **kwargs) -> str:
        """生成缓存键"""
        cache_data = {
            'query': query,
            'k': k,
            'embedding_model': self.current_model_config.get('name', 'default'),
            'kwargs': kwargs
        }
        return hashlib.md5(json.dumps(cache_data, sort_keys=True).encode()).hexdigest()

    def _is_cache_valid(self, timestamp: float) -> bool:
        """检查缓存是否有效"""
        return time.time() - timestamp < self.cache_ttl

    def _cleanup_expired_cache(self):
        """清理过期的缓存项"""
        try:
            with self.cache_lock:
                current_time = time.time()
                
                # 清理查询缓存
                expired_keys = []
                for key, value in self.query_cache.items():
                    if not self._is_cache_valid(value['timestamp']):
                        expired_keys.append(key)
                
                for key in expired_keys:
                    del self.query_cache[key]
                
                # 清理嵌入缓存
                expired_embedding_keys = []
                for key, value in self.embedding_cache.items():
                    if not self._is_cache_valid(value.get('timestamp', 0)):
                        expired_embedding_keys.append(key)
                
                for key in expired_embedding_keys:
                    del self.embedding_cache[key]
                
                if expired_keys or expired_embedding_keys:
                    logger.debug(f"清理了 {len(expired_keys)} 个查询缓存和 {len(expired_embedding_keys)} 个嵌入缓存")
                    
        except Exception as e:
            logger.warning(f"清理缓存时出错: {str(e)}")

    def _update_performance_stats(self, query_time: float, cache_hit: bool = False):
        """更新性能统计"""
        with self.cache_lock:
            self.performance_stats['total_queries'] += 1
            if cache_hit:
                self.performance_stats['cache_hits'] += 1
            else:
                self.performance_stats['total_search_time'] = self.performance_stats.get('total_search_time', 0) + query_time
            
            # 更新平均查询时间
            total_time = self.performance_stats['avg_query_time'] * (self.performance_stats['total_queries'] - 1)
            self.performance_stats['avg_query_time'] = (total_time + query_time) / self.performance_stats['total_queries']
            
            # 记录最近10次查询
            self.performance_stats['last_10_queries'].append({
                'time': query_time,
                'cache_hit': cache_hit,
                'timestamp': time.time()
            })
            
            # 保持只记录最近10次
            if len(self.performance_stats['last_10_queries']) > 10:
                self.performance_stats['last_10_queries'] = self.performance_stats['last_10_queries'][-10:]

    def get_performance_stats(self) -> Dict[str, Any]:
        """获取性能统计信息"""
        with self.cache_lock:
            stats = self.performance_stats.copy()
            stats['cache_hit_rate'] = (
                stats['cache_hits'] / stats['total_queries'] * 100 
                if stats['total_queries'] > 0 else 0
            )
            stats['cache_size'] = len(self.query_cache)
            stats['embedding_cache_size'] = len(self.embedding_cache)
            return stats

    def batch_similarity_search(self, queries: List[str], k: int = 4, use_cache: bool = True) -> List[List[Document]]:
        """
        批量相似性搜索
        
        Args:
            queries: 查询列表
            k: 每个查询返回的文档数量
            use_cache: 是否使用缓存
            
        Returns:
            每个查询对应的文档列表
        """
        if not queries:
            return []
        
        logger.info(f"开始批量搜索 {len(queries)} 个查询")
        start_time = time.time()
        
        # 使用线程池并行处理
        futures = []
        for query in queries:
            future = self.executor.submit(
                self.similarity_search,
                query, k, None, use_cache
            )
            futures.append(future)
        
        # 收集结果
        results = []
        for future in futures:
            try:
                result = future.result(timeout=30)  # 30秒超时
                results.append(result)
            except Exception as e:
                logger.error(f"批量搜索中的单个查询失败: {str(e)}")
                results.append([])
        
        total_time = time.time() - start_time
        logger.info(f"批量搜索完成，处理 {len(queries)} 个查询 (总耗时: {total_time:.2f}s)")
        
        return results

    def clear_cache(self) -> None:
        """清理所有缓存"""
        with self.cache_lock:
            self.query_cache.clear()
            self.embedding_cache.clear()
            logger.info("缓存已清理")

    def get_cache_stats(self) -> Dict[str, int]:
        """获取缓存统计"""
        return {
            'query_cache_size': len(self.query_cache),
            'embedding_cache_size': len(self.embedding_cache),
            'max_cache_size': self.embedding_cache_size
        }
    
    def _initialize_vector_store(self):
        """初始化向量存储"""
        try:
            logger.info("正在初始化Chroma向量存储...")
            start_time = time.time()
            
            self.vector_store = Chroma(
                client=self.client,
                collection_name=self.collection_name,
                embedding_function=self.embeddings,
                persist_directory=self.persist_directory
            )
            
            # 获取集合信息
            try:
                collection = self.client.get_collection(self.collection_name)
                count = collection.count()
                logger.info(f"向量存储初始化完成 (耗时: {time.time() - start_time:.2f}s)")
                logger.info(f"当前集合中包含 {count} 个文档")
            except Exception:
                logger.info(f"向量存储初始化完成 (耗时: {time.time() - start_time:.2f}s)")
                logger.info("集合为空，准备接收新文档")
                
        except Exception as e:
            logger.error(f"初始化向量存储失败: {str(e)}")
            raise
    
    def add_documents(self, 
                     documents: List[Dict[str, Any]], 
                     ids: Optional[List[str]] = None) -> List[str]:
        """
        添加文档到向量存储
        
        Args:
            documents: 文档列表，每个文档包含page_content和metadata
            ids: 文档ID列表（可选）
            
        Returns:
            文档ID列表
        """
        if not documents:
            return []
        
        start_time = time.time()
        logger.info(f"开始添加 {len(documents)} 个文档到向量数据库...")
        
        try:
            # 获取批处理大小
            batch_size = int(os.getenv('EMBEDDING_BATCH_SIZE', '8'))
            
            # 转换为LangChain Document格式
            langchain_docs = []
            doc_ids = []
            
            for i, doc in enumerate(documents):
                # 过滤复杂元数据，只保留基本类型
                original_metadata = doc.get('metadata', {})
                filtered_metadata = {}
                
                for key, value in original_metadata.items():
                    # 只保留字符串、数字、布尔值等基本类型
                    if isinstance(value, (str, int, float, bool)):
                        filtered_metadata[key] = value
                    elif isinstance(value, (list, tuple)):
                        # 将列表/元组转换为字符串
                        filtered_metadata[key] = str(value)
                    elif value is None:
                        # None值转为空字符串
                        filtered_metadata[key] = ""
                    else:
                        # 其他复杂类型转为字符串
                        filtered_metadata[key] = str(value)
                
                # 创建文档
                lc_doc = Document(
                    page_content=doc['page_content'],
                    metadata=filtered_metadata
                )
                langchain_docs.append(lc_doc)
                
                # 生成或获取文档ID
                if ids and i < len(ids):
                    doc_id = ids[i]
                else:
                    doc_id = str(uuid.uuid4())
                doc_ids.append(doc_id)
            
            # 分批添加到向量存储
            all_doc_ids = []
            for i in range(0, len(langchain_docs), batch_size):
                batch_docs = langchain_docs[i:i + batch_size]
                batch_ids = doc_ids[i:i + batch_size]
                
                self.vector_store.add_documents(
                    documents=batch_docs,
                    ids=batch_ids
                )
                all_doc_ids.extend(batch_ids)
                
                # 批次间清理GPU缓存
                if i + batch_size < len(langchain_docs) and torch.cuda.is_available():
                    torch.cuda.empty_cache()
            
            add_time = time.time() - start_time
            logger.info(f"成功添加 {len(documents)} 个文档到向量存储 (耗时: {add_time:.2f}s)")
            return all_doc_ids
            
        except Exception as e:
            logger.error(f"添加文档到向量存储失败: {str(e)}")
            raise
    
    def add_texts(self, 
                  texts: List[str], 
                  metadatas: Optional[List[Dict[str, Any]]] = None,
                  ids: Optional[List[str]] = None) -> List[str]:
        """
        添加文本到向量存储
        
        Args:
            texts: 文本列表
            metadatas: 元数据列表
            ids: 文档ID列表
            
        Returns:
            文档ID列表
        """
        if not texts:
            return []
        
        try:
            documents = []
            for i, text in enumerate(texts):
                metadata = metadatas[i] if metadatas and i < len(metadatas) else {}
                documents.append({
                    'page_content': text,
                    'metadata': metadata
                })
            
            return self.add_documents(documents, ids)
            
        except Exception as e:
            logger.error(f"添加文本到向量存储失败: {str(e)}")
            raise
    
    def similarity_search(self, 
                         query: str, 
                         k: int = 4,
                         filter_dict: Optional[Dict[str, Any]] = None,
                         use_cache: bool = True) -> List[Document]:
        """
        相似性搜索（带缓存优化）
        
        Args:
            query: 查询文本
            k: 返回结果数量
            filter_dict: 过滤条件
            use_cache: 是否使用缓存
            
        Returns:
            相关文档列表
        """
        if not self.vector_store:
            raise ValueError("向量存储未初始化")
        
        start_time = time.time()
        
        try:
            cache_key = self._get_cache_key(query, k, **(filter_dict or {}))
            
            # 检查缓存
            if use_cache:
                with self.cache_lock:
                    cached_result = self.query_cache.get(cache_key)
                    if cached_result and self._is_cache_valid(cached_result['timestamp']):
                        # 缓存命中
                        self._update_performance_stats(time.time() - start_time, cache_hit=True)
                        logger.info(f"缓存命中: {query} (耗时: {(time.time() - start_time)*1000:.1f}ms)")
                        
                        # 重建Document对象
                        docs = []
                        for doc_data in cached_result['documents']:
                            doc = Document(
                                page_content=doc_data['content'],
                                metadata=doc_data['metadata']
                            )
                            docs.append(doc)
                        return docs
            
            # 执行实际搜索
            logger.info(f"正在搜索相关文档: {query}")
            results = self.vector_store.similarity_search(
                query=query,
                k=k,
                filter=filter_dict
            )
            search_time = time.time() - start_time
            
            # 缓存结果
            if use_cache:
                with self.cache_lock:
                    self.query_cache[cache_key] = {
                        'documents': [
                            {
                                'content': doc.page_content,
                                'metadata': doc.metadata
                            }
                            for doc in results
                        ],
                        'timestamp': time.time()
                    }
                    
                    # 清理过期缓存
                    self._cleanup_expired_cache()
            
            # 更新性能统计
            self._update_performance_stats(search_time, cache_hit=False)
            
            logger.info(f"找到 {len(results)} 个相关文档 (耗时: {search_time:.2f}s)")
            return results
            
        except Exception as e:
            logger.error(f"相似性搜索失败: {str(e)}")
            raise
    
    def similarity_search_with_score(self, 
                                    query: str, 
                                    k: int = 4,
                                    filter_dict: Optional[Dict[str, Any]] = None) -> List[Tuple[Document, float]]:
        """
        带分数的相似性搜索
        
        Args:
            query: 查询文本
            k: 返回结果数量
            filter_dict: 过滤条件
            
        Returns:
            (文档, 相似度分数)元组列表
        """
        try:
            results = self.vector_store.similarity_search_with_score(
                query=query,
                k=k,
                filter=filter_dict
            )
            
            logger.info(f"找到 {len(results)} 个相关文档及分数")
            return results
            
        except Exception as e:
            logger.error(f"带分数的相似性搜索失败: {str(e)}")
            raise
    
    def max_marginal_relevance_search(self, 
                                    query: str, 
                                    k: int = 4,
                                    fetch_k: int = 20,
                                    lambda_mult: float = 0.5) -> List[Document]:
        """
        最大边际相关性搜索（MMR）
        
        Args:
            query: 查询文本
            k: 返回结果数量
            fetch_k: 初始检索数量
            lambda_mult: 多样性参数，0-1之间
            
        Returns:
            相关文档列表
        """
        try:
            results = self.vector_store.max_marginal_relevance_search(
                query=query,
                k=k,
                fetch_k=fetch_k,
                lambda_mult=lambda_mult
            )
            
            logger.info(f"MMR搜索找到 {len(results)} 个相关文档")
            return results
            
        except Exception as e:
            logger.error(f"MMR搜索失败: {str(e)}")
            raise
    
    def delete_documents(self, ids: List[str]) -> bool:
        """
        删除文档
        
        Args:
            ids: 要删除的文档ID列表
            
        Returns:
            是否成功删除
        """
        try:
            self.vector_store.delete(ids=ids)
            logger.info(f"成功删除 {len(ids)} 个文档")
            return True
            
        except Exception as e:
            logger.error(f"删除文档失败: {str(e)}")
            return False
    
    def update_document(self, 
                       doc_id: str, 
                       text: str, 
                       metadata: Optional[Dict[str, Any]] = None) -> bool:
        """
        更新文档
        
        Args:
            doc_id: 文档ID
            text: 新的文本内容
            metadata: 新的元数据
            
        Returns:
            是否成功更新
        """
        try:
            # 先删除旧文档
            self.delete_documents([doc_id])
            
            # 添加新文档
            document = {
                'page_content': text,
                'metadata': metadata or {}
            }
            self.add_documents([document], [doc_id])
            
            logger.info(f"成功更新文档: {doc_id}")
            return True
            
        except Exception as e:
            logger.error(f"更新文档失败: {str(e)}")
            return False
    
    def get_all_documents(self) -> List[Document]:
        """获取向量存储中的所有文档"""
        try:
            # 获取集合统计
            collection = self.client.get_collection(self.collection_name)
            count = collection.count()
            if count == 0:
                return []
            
            # 获取所有文档
            results = collection.get()
            documents = []
            
            # 处理返回结果
            doc_contents = results.get('documents', [])
            metadatas = results.get('metadatas', [])
            
            # 确保长度一致
            min_len = min(len(doc_contents), len(metadatas))
            
            for i in range(min_len):
                doc_content = doc_contents[i]
                metadata = metadatas[i] if i < len(metadatas) else {}
                
                if doc_content:  # 确保文档内容不为空
                    # 过滤复杂元数据
                    filtered_metadata = {}
                    for k, v in (metadata or {}).items():
                        if isinstance(v, (str, int, float, bool)):
                            filtered_metadata[k] = v
                    
                    doc = Document(
                        page_content=doc_content,
                        metadata=filtered_metadata
                    )
                    documents.append(doc)
            
            logger.info(f"从向量存储中获取了 {len(documents)} 个文档")
            return documents
            
        except Exception as e:
            logger.error(f"获取所有文档失败: {e}")
            return []

    def get_document_count(self) -> int:
        """
        获取文档总数
        
        Returns:
            文档数量
        """
        try:
            collection = self.client.get_collection(self.collection_name)
            return collection.count()
        except Exception as e:
            logger.error(f"获取文档数量失败: {str(e)}")
            return 0
    
    def get_collection_stats(self) -> Dict[str, Any]:
        """
        获取集合统计信息
        
        Returns:
            统计信息字典
        """
        try:
            collection = self.client.get_collection(self.collection_name)
            count = collection.count()
            
            return {
                'collection_name': self.collection_name,
                'total_documents': count,
                'embedding_model': self.embedding_model,
                'persist_directory': self.persist_directory
            }
            
        except Exception as e:
            logger.error(f"获取统计信息失败: {str(e)}")
            return {}
    
    def clear_collection(self) -> bool:
        """
        清空集合
        
        Returns:
            是否成功清空
        """
        try:
            self.client.delete_collection(self.collection_name)
            self._initialize_vector_store()
            logger.info(f"成功清空集合: {self.collection_name}")
            return True
            
        except Exception as e:
            logger.error(f"清空集合失败: {str(e)}")
            return False
    
    def list_collections(self) -> List[str]:
        """
        列出所有集合
        
        Returns:
            集合名称列表
        """
        try:
            collections = self.client.list_collections()
            return [col.name for col in collections]
            
        except Exception as e:
            logger.error(f"获取集合列表失败: {str(e)}")
            return []
    
    def create_collection(self, collection_name: str) -> bool:
        """
        创建新集合
        
        Args:
            collection_name: 集合名称
            
        Returns:
            是否成功创建
        """
        try:
            self.client.create_collection(collection_name)
            logger.info(f"成功创建集合: {collection_name}")
            return True
            
        except Exception as e:
            logger.error(f"创建集合失败: {str(e)}")
            return False
    
    def switch_collection(self, collection_name: str) -> bool:
        """
        切换到指定集合
        
        Args:
            collection_name: 集合名称
            
        Returns:
            是否成功切换
        """
        try:
            self.collection_name = collection_name
            self._initialize_vector_store()
            logger.info(f"已切换到集合: {collection_name}")
            return True
            
        except Exception as e:
            logger.error(f"切换集合失败: {str(e)}")
            return False


class VectorStoreQuery:
    """向量存储查询类"""
    
    def __init__(self, vector_store_manager: VectorStoreManager):
        """
        初始化查询器
        
        Args:
            vector_store_manager: 向量存储管理器实例
        """
        self.vector_store = vector_store_manager
    
    def search_by_source(self, source: str, k: int = 4) -> List[Document]:
        """
        按来源搜索文档
        
        Args:
            source: 文档来源
            k: 返回结果数量
            
        Returns:
            相关文档列表
        """
        return self.vector_store.similarity_search(
            query=source,
            k=k,
            filter_dict={"source": source}
        )
    
    def search_by_file_type(self, file_type: str, k: int = 4) -> List[Document]:
        """
        按文件类型搜索文档
        
        Args:
            file_type: 文件类型（如pdf, txt, docx）
            k: 返回结果数量
            
        Returns:
            相关文档列表
        """
        return self.vector_store.similarity_search(
            query=file_type,
            k=k,
            filter_dict={"file_extension": file_type}
        )
    
    def get_document_by_chunk_id(self, chunk_id: int) -> List[Document]:
        """
        通过chunk_id获取文档
        
        Args:
            chunk_id: 块ID
            
        Returns:
            相关文档列表
        """
        return self.vector_store.similarity_search(
            query="",
            k=1,
            filter_dict={"chunk_id": chunk_id}
        )


# 使用示例
if __name__ == "__main__":
    # 测试向量存储管理器
    manager = VectorStoreManager()
    
    # 测试文档
    test_docs = [
        {
            'page_content': '这是第一个测试文档。内容很短。',
            'metadata': {'source': 'test1.txt', 'file_extension': '.txt'}
        },
        {
            'page_content': '这是第二个测试文档。内容稍微长一点，用于测试向量存储功能。',
            'metadata': {'source': 'test2.txt', 'file_extension': '.txt'}
        }
    ]
    
    # 添加文档
    doc_ids = manager.add_documents(test_docs)
    print(f"添加的文档ID: {doc_ids}")
    
    # 获取统计信息
    stats = manager.get_collection_stats()
    print(f"集合统计: {stats}")
    
    # 相似性搜索
    results = manager.similarity_search("测试文档", k=2)
    print(f"搜索结果: {len(results)} 个文档")
    
    # 带分数的搜索
    results_with_score = manager.similarity_search_with_score("测试文档", k=2)
    for doc, score in results_with_score:
        print(f"文档: {doc.page_content[:20]}..., 分数: {score}")
    
    # 清理
    # manager.clear_collection()