"""
向量存储模块
处理文档的向量化存储和检索，支持本地和OpenAI嵌入模型
"""
import os
from typing import List, Dict, Any, Optional, Union, Tuple
from pathlib import Path
import logging
import uuid
from datetime import datetime

from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.schema import Document as LangchainDocument
from pydantic import BaseModel, Field
import torch

from config import settings, ModelType
from document_processor import DocumentChunk

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class VectorStoreConfig(BaseModel):
    """向量存储配置"""
    persist_directory: str = settings.persist_directory
    embedding_model: str = "text-embedding-3-small"
    collection_name: str = "documents"

class VectorStoreManager:
    """向量存储管理器"""
    
    def __init__(self, config: Optional[VectorStoreConfig] = None):
        """
        初始化向量存储管理器
        
        Args:
            config: 向量存储配置
        """
        self.config = config or VectorStoreConfig()
        self.embeddings = self._get_embeddings()
        self.vector_store = self._get_vector_store()
    
    def _get_embeddings(self) -> Embeddings:
        """获取嵌入模型"""
        try:
            if settings.model_type == ModelType.OPENAI:
                return OpenAIEmbeddings(
                    model=settings.openai_embedding_model if hasattr(settings, 'openai_embedding_model') else 'text-embedding-3-small',
                    openai_api_key=settings.openai_api_key,
                    openai_api_base=settings.openai_api_base
                )
            else:
                # 使用本地嵌入模型
                logger.info(f"正在加载本地嵌入模型: {settings.local_embedding_model}")
                device = 0 if torch.cuda.is_available() and settings.device == "cuda" else "cpu"
                return HuggingFaceEmbeddings(
                    model_name=settings.local_embedding_model,
                    model_kwargs={'device': device},
                    encode_kwargs={'normalize_embeddings': True}
                )
        except Exception as e:
            logger.error(f"初始化嵌入模型失败: {str(e)}")
            raise
    
    def _get_vector_store(self) -> Chroma:
        """获取或创建向量存储"""
        try:
            # 确保目录存在
            os.makedirs(self.config.persist_directory, exist_ok=True)
            
            return Chroma(
                collection_name=self.config.collection_name,
                embedding_function=self.embeddings,
                persist_directory=self.config.persist_directory
            )
        except Exception as e:
            logger.error(f"初始化向量存储失败: {str(e)}")
            raise
    
    def add_documents(self, chunks: List[DocumentChunk], **kwargs) -> List[str]:
        """
        添加文档块到向量存储
        
        Args:
            chunks: 文档块列表
            **kwargs: 其他参数
            
        Returns:
            添加的文档ID列表
        """
        if not chunks:
            return []
            
        # 转换为Langchain文档格式
        documents = []
        metadatas = []
        ids = []
        
        for chunk in chunks:
            # 生成唯一ID
            doc_id = str(uuid.uuid4())
            chunk_id = chunk.chunk_id or str(uuid.uuid4())
            document_id = chunk.document_id or str(uuid.uuid4())
            
            # 构建元数据
            metadata = {}
            
            # 添加块元数据
            if chunk.metadata:
                metadata.update(chunk.metadata)
                
            # 确保必要的元数据字段
            metadata.update({
                "chunk_id": chunk_id,
                "document_id": document_id,
                "content_type": getattr(chunk, "content_type", "text"),
                "source": metadata.get("source", "unknown"),
                "timestamp": metadata.get("timestamp", str(datetime.now().isoformat()))
            })
            
            # 创建Langchain文档
            document = LangchainDocument(
                page_content=chunk.content,
                metadata=metadata
            )
            
            documents.append(document)
            metadatas.append(metadata)
            ids.append(doc_id)
        
        # 批量添加到向量存储
        try:
            if documents:
                self.vector_store.add_documents(
                    documents=documents,
                    ids=ids,
                    metadatas=metadatas
                )
                self.vector_store.persist()
                logger.info(f"成功添加 {len(documents)} 个文档块到向量存储")
            
            return ids
            
        except Exception as e:
            error_msg = f"添加文档到向量存储失败: {str(e)}"
            logger.error(error_msg)
            raise Exception(error_msg) from e
    
    def similarity_search(
        self,
        query: str,
        k: int = 4,
        filter: Optional[Dict[str, Any]] = None,
        **kwargs
    ) -> List[tuple[LangchainDocument, float]]:
        """
        相似性搜索
        
        Args:
            query: 查询文本
            k: 返回结果数量
            filter: 过滤条件
            **kwargs: 其他参数
            
        Returns:
            包含文档和相似度得分的元组列表
        """
        try:
            # 执行搜索
            results = self.vector_store.similarity_search_with_score(
                query=query,
                k=k,
                filter=filter,
                **kwargs
            )
            
            # 返回原始结果，由调用方处理格式
            return results
            
        except Exception as e:
            logger.error(f"相似性搜索失败: {str(e)}")
            raise
            
    def search_documents(
        self,
        query: str,
        top_k: int = 4,
        filter: Optional[Dict[str, Any]] = None,
        **kwargs
    ) -> List[Dict[str, Any]]:
        """
        搜索文档并返回格式化结果
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            filter: 过滤条件
            **kwargs: 其他参数
            
        Returns:
            格式化后的搜索结果列表
        """
        try:
            # 执行搜索
            results = self.similarity_search(
                query=query,
                k=top_k,
                filter=filter,
                **kwargs
            )
            
            # 格式化结果
            formatted_results = []
            for doc, score in results:
                formatted_results.append({
                    "content": doc.page_content,
                    "metadata": doc.metadata,
                    "score": float(score)
                })
            
            return formatted_results
            
        except Exception as e:
            logger.error(f"搜索文档失败: {str(e)}")
            raise
    
    def delete_documents(self, ids: List[str] = None, filter: Optional[Dict[str, Any]] = None) -> bool:
        """
        删除文档
        
        Args:
            ids: 文档ID列表
            filter: 过滤条件
            
        Returns:
            是否删除成功
        """
        try:
            if ids:
                self.vector_store.delete(ids=ids)
            elif filter:
                self.vector_store.delete(where=filter)
            else:
                logger.warning("未提供ids或filter，将不会删除任何文档")
                return False
                
            self.vector_store.persist()
            return True
        except Exception as e:
            logger.error(f"删除文档失败: {str(e)}")
            return False
    
    def get_collection_stats(self) -> Dict[str, Any]:
        """
        获取集合统计信息
        
        Returns:
            集合统计信息
        """
        try:
            collection = self.vector_store._collection
            if not collection:
                return {"error": "Collection not found"}
                
            return {
                "name": collection.name,
                "count": collection.count(),
                "metadata": collection.metadata
            }
        except Exception as e:
            logger.error(f"获取集合统计信息失败: {str(e)}")
            return {"error": str(e)}
