"""
向量存储管理器

这个模块负责管理文档嵌入和检索功能。
支持ChromaDB作为向量数据库，提供语义搜索功能。
"""

import logging
import os
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path

from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter

from config.settings import settings
from .embedding_manager import embedding_manager

logger = logging.getLogger(__name__)

class VectorStoreManager:
    """
    向量存储管理器类
    
    负责管理文档的向量化存储、检索和相似度搜索。
    支持文档添加、删除、更新和查询功能。
    """
    
    def __init__(self):
        """初始化向量存储管理器"""
        self.vector_store = None
        self.text_splitter = None
        self._initialize_components()
    
    def _initialize_components(self):
        """初始化组件"""
        logger.info("正在初始化向量存储管理器...")
        try:
            self._initialize_text_splitter()
            self._initialize_vector_store()
            logger.info("向量存储管理器初始化完成")
        except Exception as e:
            logger.error(f"向量存储管理器初始化失败: {e}")
    
    def _initialize_text_splitter(self):
        """初始化文本分割器"""
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=settings.CHUNK_SIZE,
            chunk_overlap=settings.CHUNK_OVERLAP,
            length_function=len,
            separators=["\n\n", "\n", " ", ""]
        )
        logger.info(f"已初始化文本分割器 (chunk_size={settings.CHUNK_SIZE}, overlap={settings.CHUNK_OVERLAP})")
    
    def _initialize_vector_store(self):
        """初始化向量存储"""
        if not embedding_manager.is_ready():
            logger.warning("嵌入管理器未就绪，跳过向量存储初始化")
            return
        
        try:
            # 确保持久化目录存在
            persist_directory = Path(settings.CHROMA_PERSIST_DIRECTORY)
            persist_directory.mkdir(parents=True, exist_ok=True)
            
            # 获取当前嵌入模型
            embedding_model = embedding_manager.get_current_model()
            if not embedding_model:
                logger.warning("无法获取嵌入模型，跳过向量存储初始化")
                return
            
            # 初始化ChromaDB
            self.vector_store = Chroma(
                collection_name=settings.CHROMA_COLLECTION_NAME,
                embedding_function=embedding_model,
                persist_directory=str(persist_directory)
            )
            
            logger.info(f"已初始化向量存储: {settings.CHROMA_COLLECTION_NAME}")
            
        except Exception as e:
            logger.error(f"初始化向量存储失败: {str(e)}")
    
    def add_documents(self, documents: List[Document], **kwargs) -> bool:
        """
        添加文档到向量存储
        
        Args:
            documents (List[Document]): 文档列表
            **kwargs: 额外参数
            
        Returns:
            bool: 是否添加成功
        """
        if not self.is_available():
            logger.error("向量存储不可用")
            return False
        
        if not documents:
            logger.warning("没有文档需要添加")
            return True
        
        try:
            # 分割文档
            split_docs = []
            for doc in documents:
                chunks = self.text_splitter.split_documents([doc])
                split_docs.extend(chunks)
            
            # 添加到向量存储
            self.vector_store.add_documents(split_docs, **kwargs)
            
            logger.info(f"已添加 {len(documents)} 个文档 ({len(split_docs)} 个块)")
            return True
            
        except Exception as e:
            logger.error(f"添加文档失败: {str(e)}")
            return False
    
    def add_texts(self, texts: List[str], metadatas: Optional[List[Dict]] = None, **kwargs) -> bool:
        """
        添加文本到向量存储
        
        Args:
            texts (List[str]): 文本列表
            metadatas (Optional[List[Dict]]): 元数据列表
            **kwargs: 额外参数
            
        Returns:
            bool: 是否添加成功
        """
        if not self.vector_store:
            logger.error("向量存储未初始化")
            return False
        
        if not texts:
            logger.warning("没有文本需要添加")
            return True
        
        try:
            # 分割文本
            split_texts = []
            split_metadatas = []
            
            for i, text in enumerate(texts):
                chunks = self.text_splitter.split_text(text)
                split_texts.extend(chunks)
                
                # 复制元数据到每个块
                if metadatas and i < len(metadatas):
                    chunk_metadata = metadatas[i].copy()
                    split_metadatas.extend([chunk_metadata] * len(chunks))
                else:
                    split_metadatas.extend([{}] * len(chunks))
            
            # 添加到向量存储
            self.vector_store.add_texts(
                texts=split_texts,
                metadatas=split_metadatas if split_metadatas else None,
                **kwargs
            )
            
            logger.info(f"已添加 {len(texts)} 个文本 ({len(split_texts)} 个块)")
            return True
            
        except Exception as e:
            logger.error(f"添加文本失败: {str(e)}")
            return False
    
    def similarity_search(self, query: str, k: int = 4, **kwargs) -> List[Document]:
        """
        相似度搜索
        
        Args:
            query (str): 查询文本
            k (int): 返回结果数量
            **kwargs: 额外参数
            
        Returns:
            List[Document]: 相似文档列表
        """
        if not self.is_available():
            logger.error("向量存储不可用")
            return []
        
        try:
            results = self.vector_store.similarity_search(query, k=k, **kwargs)
            logger.debug(f"相似度搜索返回 {len(results)} 个结果")
            return results
            
        except Exception as e:
            logger.error(f"相似度搜索失败: {str(e)}")
            return []
    
    def similarity_search_with_score(self, query: str, k: int = 4, **kwargs) -> List[Tuple[Document, float]]:
        """
        带分数的相似度搜索
        
        Args:
            query (str): 查询文本
            k (int): 返回结果数量
            **kwargs: 额外参数
            
        Returns:
            List[Tuple[Document, float]]: 文档和分数的元组列表
        """
        if not self.vector_store:
            logger.error("向量存储未初始化")
            return []
        
        try:
            results = self.vector_store.similarity_search_with_score(query, k=k, **kwargs)
            logger.debug(f"带分数的相似度搜索返回 {len(results)} 个结果")
            return results
            
        except Exception as e:
            logger.error(f"带分数的相似度搜索失败: {str(e)}")
            return []
    
    def delete_documents(self, ids: List[str]) -> bool:
        """
        删除文档
        
        Args:
            ids (List[str]): 文档ID列表
            
        Returns:
            bool: 是否删除成功
        """
        if not self.vector_store:
            logger.error("向量存储未初始化")
            return False
        
        try:
            self.vector_store.delete(ids)
            logger.info(f"已删除 {len(ids)} 个文档")
            return True
            
        except Exception as e:
            logger.error(f"删除文档失败: {str(e)}")
            return False
    
    def get_collection_info(self) -> Dict[str, Any]:
        """
        获取集合信息
        
        Returns:
            Dict[str, Any]: 集合信息
        """
        if not self.vector_store:
            return {"error": "向量存储未初始化"}
        
        try:
            # 尝试获取集合统计信息
            count = 0
            collection_name = settings.CHROMA_COLLECTION_NAME
            
            # 使用更安全的方式获取文档数量
            if hasattr(self.vector_store, '_collection') and self.vector_store._collection:
                try:
                    count = self.vector_store._collection.count()
                except Exception as e:
                    logger.warning(f"无法获取文档数量: {e}")
                    # 尝试通过搜索获取数量
                    try:
                        # 执行一个简单的搜索来测试向量存储是否工作
                        test_results = self.vector_store.similarity_search("test", k=1)
                        count = "可用但无法获取确切数量"
                    except Exception:
                        count = "未知"
            
            return {
                "collection_name": collection_name,
                "collection_count": count,
                "persist_directory": settings.CHROMA_PERSIST_DIRECTORY,
                "embedding_model": embedding_manager.get_model_info().get('model_name', '未知'),
                "vector_store_type": type(self.vector_store).__name__,
                "is_available": True
            }
            
        except Exception as e:
            logger.error(f"获取集合信息失败: {str(e)}")
            return {"error": str(e)}
    
    def clear_collection(self) -> bool:
        """
        清空集合
        
        Returns:
            bool: 是否清空成功
        """
        if not self.vector_store:
            logger.error("向量存储未初始化")
            return False
        
        try:
            # 获取所有文档ID并删除
            collection = self.vector_store._collection
            all_docs = collection.get()
            
            if all_docs['ids']:
                collection.delete(ids=all_docs['ids'])
                logger.info(f"已清空集合，删除了 {len(all_docs['ids'])} 个文档")
            else:
                logger.info("集合已经是空的")
            
            return True
            
        except Exception as e:
            logger.error(f"清空集合失败: {str(e)}")
            return False
    
    def is_available(self) -> bool:
        """
        检查向量存储是否可用
        
        Returns:
            bool: 是否可用
        """
        return self.vector_store is not None

# 创建全局向量存储管理器实例
vector_store_manager = VectorStoreManager() 