"""
向量数据库（ChromaDB）连接和管理
用于知识库的向量存储和检索
"""
from typing import Optional, List, Dict, Any
import os
from pathlib import Path
import chromadb
from chromadb.config import Settings
from chromadb.api import ClientAPI
from chromadb.api.models.Collection import Collection
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings, DashScopeEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import DirectoryLoader, TextLoader
from langchain.docstore.document import Document
from loguru import logger

from app.config import settings


class VectorDBManager:
    """向量数据库管理器"""

    def __init__(self):
        self.client: Optional[ClientAPI] = None
        self.collection: Optional[Collection] = None
        self.embedding_function = None

    def init(self):
        """初始化ChromaDB客户端"""
        try:
            # 确保持久化目录存在
            persist_dir = Path(settings.CHROMA_PERSIST_DIRECTORY)
            persist_dir.mkdir(parents=True, exist_ok=True)

            # 创建ChromaDB客户端
            self.client = chromadb.PersistentClient(
                path=str(persist_dir),
                settings=Settings(
                    anonymized_telemetry=False,  # 关闭匿名遥测
                    allow_reset=True,  # 允许重置
                ),
            )

            # 初始化Embedding函数
            self._init_embedding()

            # 获取或创建集合
            self.collection = self.client.get_or_create_collection(
                name=settings.CHROMA_COLLECTION_NAME,
                metadata={"description": "电商客服知识库"},
            )

            logger.info(
                f"ChromaDB初始化成功: {persist_dir}, 集合: {settings.CHROMA_COLLECTION_NAME}"
            )
            logger.info(f"当前集合文档数量: {self.collection.count()}")

        except Exception as e:
            logger.error(f"ChromaDB初始化失败: {e}")
            raise

    def _init_embedding(self):
        """初始化Embedding模型"""
        try:
            provider = settings.EMBEDDING_PROVIDER.lower()
            logger.info(f"使用Embedding提供商: {provider}")
            
            if provider == "openai":
                # OpenAI Embeddings
                if not settings.OPENAI_API_KEY:
                    raise ValueError("使用OpenAI需要配置OPENAI_API_KEY")
                logger.info(f"使用OpenAI Embeddings模型: {settings.OPENAI_EMBEDDING_MODEL}")
                self.embedding_function = OpenAIEmbeddings(
                    model=settings.OPENAI_EMBEDDING_MODEL,
                    openai_api_key=settings.OPENAI_API_KEY,
                )
                
            elif provider == "dashscope":
                # 千问/通义千问 Embeddings
                if not settings.DASHSCOPE_API_KEY:
                    raise ValueError("使用千问需要配置DASHSCOPE_API_KEY")
                logger.info(f"使用千问Embeddings模型: {settings.DASHSCOPE_EMBEDDING_MODEL}")
                self.embedding_function = DashScopeEmbeddings(
                    model=settings.DASHSCOPE_EMBEDDING_MODEL,
                    dashscope_api_key=settings.DASHSCOPE_API_KEY,
                )
                
            elif provider == "local":
                # 本地HuggingFace模型
                logger.info(f"使用本地Embeddings模型: {settings.LOCAL_EMBEDDING_MODEL}")
                self.embedding_function = HuggingFaceEmbeddings(
                    model_name=settings.LOCAL_EMBEDDING_MODEL,
                    model_kwargs={"device": "cpu"},
                    encode_kwargs={"normalize_embeddings": True},
                )
                
            else:
                raise ValueError(f"不支持的Embedding提供商: {provider}")
            
            logger.info("✅ Embedding模型初始化成功")

        except Exception as e:
            logger.error(f"Embedding模型初始化失败: {e}")
            raise

    def close(self):
        """关闭向量数据库连接"""
        # ChromaDB的PersistentClient会自动持久化，不需要显式关闭
        logger.info("ChromaDB连接已关闭")

    # ==================== 文档加载和分块 ====================

    def load_documents_from_directory(self, directory: str) -> List[Document]:
        """
        从目录加载文档
        
        Args:
            directory: 文档目录路径
            
        Returns:
            文档列表
        """
        try:
            logger.info(f"开始加载文档目录: {directory}")

            # 加载Markdown文件
            loader = DirectoryLoader(
                directory,
                glob="**/*.md",
                loader_cls=TextLoader,
                loader_kwargs={"encoding": "utf-8"},
                show_progress=True,
            )
            documents = loader.load()

            logger.info(f"成功加载 {len(documents)} 个文档")
            return documents

        except Exception as e:
            logger.error(f"加载文档失败: {e}")
            return []

    def split_documents(self, documents: List[Document]) -> List[Document]:
        """
        分割文档为小块
        
        Args:
            documents: 原始文档列表
            
        Returns:
            分割后的文档列表
        """
        try:
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=settings.CHUNK_SIZE,
                chunk_overlap=settings.CHUNK_OVERLAP,
                length_function=len,
                separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""],
            )

            chunks = text_splitter.split_documents(documents)
            logger.info(f"文档分割完成，共 {len(chunks)} 个块")
            return chunks

        except Exception as e:
            logger.error(f"文档分割失败: {e}")
            return []

    # ==================== 向量存储 ====================

    def add_documents(self, documents: List[Document]) -> bool:
        """
        添加文档到向量数据库
        
        Args:
            documents: 文档列表
            
        Returns:
            是否成功
        """
        try:
            if not documents:
                logger.warning("没有文档需要添加")
                return False

            # 准备数据
            texts = [doc.page_content for doc in documents]
            metadatas = [doc.metadata for doc in documents]
            ids = [f"doc_{i}" for i in range(len(documents))]

            # 生成embeddings
            logger.info("正在生成文档embeddings...")
            embeddings = self.embedding_function.embed_documents(texts)

            # 添加到ChromaDB
            self.collection.add(
                embeddings=embeddings,
                documents=texts,
                metadatas=metadatas,
                ids=ids,
            )

            logger.info(f"成功添加 {len(documents)} 个文档到向量数据库")
            return True

        except Exception as e:
            logger.error(f"添加文档失败: {e}")
            return False

    def add_texts(
        self, texts: List[str], metadatas: Optional[List[Dict]] = None
    ) -> bool:
        """
        添加文本到向量数据库
        
        Args:
            texts: 文本列表
            metadatas: 元数据列表
            
        Returns:
            是否成功
        """
        try:
            if not texts:
                return False

            # 生成embeddings
            embeddings = self.embedding_function.embed_documents(texts)

            # 生成IDs
            current_count = self.collection.count()
            ids = [f"text_{current_count + i}" for i in range(len(texts))]

            # 添加到ChromaDB
            self.collection.add(
                embeddings=embeddings,
                documents=texts,
                metadatas=metadatas or [{} for _ in texts],
                ids=ids,
            )

            logger.info(f"成功添加 {len(texts)} 个文本")
            return True

        except Exception as e:
            logger.error(f"添加文本失败: {e}")
            return False

    # ==================== 向量检索 ====================

    def search(
        self,
        query: str,
        n_results: int = 5,
        where: Optional[Dict] = None,
        where_document: Optional[Dict] = None,
    ) -> List[Dict[str, Any]]:
        """
        搜索相关文档
        
        Args:
            query: 查询文本
            n_results: 返回结果数量
            where: 元数据过滤条件
            where_document: 文档内容过滤条件
            
        Returns:
            搜索结果列表
        """
        try:
            # 生成query embedding
            query_embedding = self.embedding_function.embed_query(query)

            # 执行搜索
            results = self.collection.query(
                query_embeddings=[query_embedding],
                n_results=n_results,
                where=where,
                where_document=where_document,
                include=["documents", "metadatas", "distances"],
            )

            # 格式化结果
            formatted_results = []
            if results["documents"] and results["documents"][0]:
                for i, doc in enumerate(results["documents"][0]):
                    # 计算相关度分数
                    distance = results["distances"][0][i] if results["distances"] else 0
                    
                    # ChromaDB使用cosine距离，范围是0-2
                    # 距离越小越相关，转换为0-1的相关度分数
                    # 使用公式: relevance = max(0, 1 - distance/2)
                    relevance_score = max(0, 1 - distance / 2.0)
                    
                    formatted_results.append(
                        {
                            "content": doc,
                            "metadata": results["metadatas"][0][i] if results["metadatas"] else {},
                            "distance": distance,
                            "relevance_score": relevance_score,
                        }
                    )

            logger.info(f"搜索完成，找到 {len(formatted_results)} 个相关文档")
            return formatted_results

        except Exception as e:
            logger.error(f"搜索失败: {e}")
            return []

    def similarity_search(
        self, query: str, k: int = 5, threshold: float = 0.7
    ) -> List[Dict[str, Any]]:
        """
        相似度搜索（过滤低相关性结果）
        
        Args:
            query: 查询文本
            k: 返回结果数量
            threshold: 相关性阈值（0-1）
            
        Returns:
            搜索结果列表
        """
        results = self.search(query, n_results=k)
        
        # 调试：显示所有检索结果的分数
        if results:
            logger.info(f"检索到的文档相关度分数:")
            for i, r in enumerate(results[:5], 1):
                score = r.get("relevance_score", 0)
                content_preview = r.get("content", "")[:60].replace('\n', ' ')
                logger.info(f"  文档{i}: 分数={score:.4f} | {content_preview}...")
        
        # 过滤低相关性结果
        filtered_results = [
            r for r in results if r.get("relevance_score", 0) >= threshold
        ]
        
        if len(results) > 0 and len(filtered_results) == 0:
            logger.warning(f"⚠️ 所有{len(results)}个文档都被阈值{threshold}过滤掉了！最高分数: {results[0].get('relevance_score', 0):.4f}")
        
        return filtered_results

    # ==================== 集合管理 ====================

    def get_collection_stats(self) -> Dict[str, Any]:
        """获取集合统计信息"""
        try:
            return {
                "name": self.collection.name,
                "count": self.collection.count(),
                "metadata": self.collection.metadata,
            }
        except Exception as e:
            logger.error(f"获取集合统计信息失败: {e}")
            return {}

    def clear_collection(self) -> bool:
        """清空集合"""
        try:
            # 删除集合
            self.client.delete_collection(name=settings.CHROMA_COLLECTION_NAME)
            # 重新创建
            self.collection = self.client.create_collection(
                name=settings.CHROMA_COLLECTION_NAME,
                metadata={"description": "电商客服知识库"},
            )
            logger.info("集合已清空")
            return True
        except Exception as e:
            logger.error(f"清空集合失败: {e}")
            return False

    def delete_by_ids(self, ids: List[str]) -> bool:
        """根据ID删除文档"""
        try:
            self.collection.delete(ids=ids)
            logger.info(f"成功删除 {len(ids)} 个文档")
            return True
        except Exception as e:
            logger.error(f"删除文档失败: {e}")
            return False

    # ==================== 知识库构建 ====================

    def build_knowledge_base(self, force_rebuild: bool = False) -> bool:
        """
        构建知识库
        
        Args:
            force_rebuild: 是否强制重建
            
        Returns:
            是否成功
        """
        try:
            # 检查是否需要重建
            current_count = self.collection.count()
            if current_count > 0 and not force_rebuild:
                logger.info(f"知识库已存在 ({current_count} 个文档)，跳过构建")
                return True

            # 清空现有数据
            if force_rebuild and current_count > 0:
                logger.info("强制重建，清空现有数据...")
                self.clear_collection()

            # 加载文档
            knowledge_base_path = settings.KNOWLEDGE_BASE_PATH
            if not os.path.exists(knowledge_base_path):
                logger.error(f"知识库目录不存在: {knowledge_base_path}")
                return False

            documents = self.load_documents_from_directory(knowledge_base_path)
            if not documents:
                logger.error("没有加载到任何文档")
                return False

            # 分割文档
            chunks = self.split_documents(documents)
            if not chunks:
                logger.error("文档分割失败")
                return False

            # 添加到向量数据库
            success = self.add_documents(chunks)
            if success:
                logger.info(
                    f"知识库构建完成！共 {len(chunks)} 个文档块，"
                    f"来自 {len(documents)} 个源文档"
                )
            return success

        except Exception as e:
            logger.error(f"构建知识库失败: {e}")
            return False


# 全局向量数据库管理器实例
vector_db_manager = VectorDBManager()


# 向量数据库健康检查
def check_vector_db_health() -> bool:
    """检查向量数据库是否正常"""
    try:
        if not vector_db_manager.client or not vector_db_manager.collection:
            return False
        # 尝试获取集合信息
        vector_db_manager.collection.count()
        return True
    except Exception as e:
        logger.error(f"向量数据库健康检查失败: {e}")
        return False


# 向量数据库初始化函数
def init_vector_db():
    """初始化向量数据库"""
    vector_db_manager.init()


# 向量数据库关闭函数
def close_vector_db():
    """关闭向量数据库"""
    vector_db_manager.close()




