import os
import logging
from typing import List, Optional, Dict, Any, Callable
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma, FAISS
import uuid
import chromadb
from chromadb.config import Settings
from langchain.vectorstores.base import VectorStore
from dotenv import load_dotenv

# 配置日志记录器
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

class VectorStoreManager:
    """向量存储管理器，处理文档嵌入和向量数据库操作"""
    def __init__(
        self,
        embedding_model: str = "ollama",
        vector_store_type: str = "chroma",
        persist_directory: str = "./vector_db",
        model_name: str = "qwen3:4b",
        **kwargs
    ):
        """
        初始化向量存储管理器
        :param embedding_model: 嵌入模型类型 (openai/huggingface)
        :param vector_store_type: 向量存储类型 (chroma/faiss)
        :param persist_directory: 向量存储持久化目录
        :param model_name: HuggingFace模型名称
        :param kwargs: 其他参数
        """
        self.embedding_model = embedding_model
        self.vector_store_type = vector_store_type
        self.persist_directory = persist_directory
        self.model_name = model_name
        self.vector_store: Optional[VectorStore] = None
        self.embeddings: Optional[Embeddings] = self._initialize_embeddings(** kwargs)

    def _initialize_embeddings(self, **kwargs) -> Embeddings:
        """初始化嵌入模型"""
        if self.embedding_model == "openai":
            # 使用OpenAI嵌入模型
            api_key = os.getenv("DEEPSEEK_API_KEY")
            if not api_key:
                raise ValueError("使用DeepSeek嵌入模型需要设置DEEPSEEK_API_KEY环境变量")
            return OpenAIEmbeddings(
                openai_api_key=api_key,
                openai_api_base="https://api.deepseek.cn/v1",
                model=self.model_name,
                **kwargs
            )
        elif self.embedding_model == "ollama":
            # 使用Ollama嵌入模型
            return OllamaEmbeddings(
                model=self.model_name,
                **kwargs
            )
        else:
            raise ValueError(f"不支持的嵌入模型类型: {self.embedding_model}")

    def _initialize_vector_store(self, documents: Optional[List[Document]] = None,** kwargs) -> VectorStore:
        """初始化向量存储"""
        if not self.embeddings:
            raise ValueError("嵌入模型未初始化")

        # 确保持久化目录存在
        os.makedirs(self.persist_directory, exist_ok=True)

        if self.vector_store_type == "chroma":
            # 使用最新的Chroma持久化客户端
            client = chromadb.PersistentClient(
                path=self.persist_directory
            )

            # 禁用遥测
            client.heartbeat()  # 初始化客户端
            client._system_settings = Settings(
                anonymized_telemetry=False
            )

            # 创建嵌入函数适配器以符合Chroma新接口
            class EmbeddingFunctionAdapter:
                def __init__(self, embedding_function: Callable):
                    self.embedding_function = embedding_function

                def __call__(self, input: List[str]) -> List[List[float]]:
                    # 调用原始嵌入函数
                    return self.embedding_function.embed_documents(input)

            # 创建或获取集合
            collection = client.get_or_create_collection(
                name="documents",
                embedding_function=EmbeddingFunctionAdapter(self.embeddings)
            )

            if documents:
                # 有文档时，从文档创建向量存储
                import time
                start_time = time.time()
                print(f"开始从{len(documents)}个文档创建Chroma向量存储...")
                
                
                # 创建向量存储
                vector_store = Chroma.from_documents(
                    documents=documents,
                    embedding=self.embeddings,
                    client=client,
                    collection_name="documents",
                    persist_directory=self.persist_directory,
                    ** kwargs
                )
                
                total_time = time.time() - start_time
                print(f"从{len(documents)}个文档创建Chroma向量存储完成，总耗时: {total_time:.2f}秒")
                return vector_store
            else:
                # 无文档时，直接使用客户端和集合创建向量存储
                return Chroma(
                    client=client,
                    collection_name="documents",
                    embedding_function=self.embeddings,
                    **kwargs
                )
        elif self.vector_store_type == "faiss":
            # 初始化FAISS向量存储
            if documents:
                return FAISS.from_documents(
                    documents=documents,
                    embedding=self.embeddings,
                    **kwargs
                )
            else:
                return FAISS.load_local(
                    folder_path=self.persist_directory,
                    embeddings=self.embeddings,
                    allow_dangerous_deserialization=True,
                    **kwargs
                )
        else:
            raise ValueError(f"不支持的向量存储类型: {self.vector_store_type}")

    def add_documents(self, documents: List[Document],** kwargs) -> List[str]:
        """
        向向量存储添加文档
        :param documents: 文档列表
        :return: 文档ID列表
        """
        # 确保每个文档都有唯一ID
        for i, doc in enumerate(documents):
            if hasattr(doc, 'metadata') and 'id' not in doc.metadata:
                # 如果文档没有ID，则生成一个唯一ID
                doc.metadata['id'] = str(uuid.uuid4())

        if not self.vector_store:
            self.vector_store = self._initialize_vector_store(documents, **kwargs)
        else:
            self.vector_store.add_documents(documents=documents,** kwargs)

        # 在Chroma 0.4.x及以上版本中，持久化是自动的，不需要手动调用persist()
        # 保持兼容代码但不实际执行持久化操作
        if hasattr(self.vector_store, 'persist') and self.vector_store_type != "chroma":
            try:
                self.vector_store.persist()
            except Exception:
                # 忽略持久化错误
                pass

        # 返回文档ID
        return [doc.metadata.get('id') for doc in documents] if hasattr(documents[0], 'metadata') else []

    def similarity_search(self, query: str, k: int = 4, **kwargs) -> List[Document]:
        """
        相似性检索
        :param query: 查询文本
        :param k: 返回结果数量
        :return: 检索到的文档列表
        """
        if not self.vector_store:
            self.vector_store = self._initialize_vector_store()
        return self.vector_store.similarity_search(query=query, k=k,** kwargs)

    def similarity_search_with_score(self, query: str, k: int = 4, **kwargs) -> List[tuple[Document, float]]:
        """
        带分数的相似性检索
        :param query: 查询文本
        :param k: 返回结果数量
        :return: 检索到的(文档, 分数)列表
        """
        if not self.vector_store:
            self.vector_store = self._initialize_vector_store()
        return self.vector_store.similarity_search_with_score(query=query, k=k,** kwargs)

    def persist(self) -> None:
        """持久化向量存储
        
        在Chroma 0.4.x及以上版本中，持久化是自动的，不需要手动调用persist()
        此方法保持向后兼容性，但对于Chroma存储类型会跳过持久化操作
        """
        if not self.vector_store:
            return
            
        # 对于Chroma存储类型，持久化是自动的，不需要手动调用
        if self.vector_store_type == "chroma":
            return
            
        # 对于其他存储类型，如果有persist方法，则调用它
        if hasattr(self.vector_store, 'persist'):
            try:
                self.vector_store.persist()
            except Exception as e:
                logger.error(f"持久化向量存储失败: {str(e)}")

    def clear(self) -> None:
        """清空向量存储"""
        if self.vector_store:
            if hasattr(self.vector_store, 'delete_collection'):
                self.vector_store.delete_collection()
            self.vector_store = None

    def save_local(self, folder_path: Optional[str] = None) -> None:
        """保存向量存储到本地"""
        if not self.vector_store:
            raise ValueError("向量存储未初始化")

        path = folder_path or self.persist_directory
        os.makedirs(path, exist_ok=True)

        if hasattr(self.vector_store, 'save_local'):
            self.vector_store.save_local(folder_path=path)
        elif hasattr(self.vector_store, 'persist'):
            self.vector_store.persist()

    def delete_document(self, document_id: str) -> bool:
        """
        根据文档ID删除向量存储中的文档
        :param document_id: 文档ID (可以是metadata中的id或向量存储的内部ID)
        :return: 是否成功删除
        """
        if not self.vector_store:
            self.vector_store = self._initialize_vector_store()

        # 对于Chroma向量存储
        if hasattr(self.vector_store, '_collection'):
            try:
                # 1. 尝试根据metadata.id删除
                results = self.vector_store._collection.get(where={'metadata.id': document_id})
                if results['ids']:
                    self.vector_store._collection.delete(ids=results['ids'])
                    self.persist()
                    return True

                # 2. 尝试直接使用document_id作为内部ID删除
                try:
                    # 检查该ID是否存在于向量存储中
                    self.vector_store._collection.get(ids=[document_id])
                    # 如果存在，则删除
                    self.vector_store._collection.delete(ids=[document_id])
                    self.persist()
                    return True
                except Exception as e:
                      # 打印异常
                    print(f"删除文档时发生异常: {str(e)}")
                    # 内部ID也不存在
                    return False

            except Exception as e:
                logger.error(f"删除文档失败: {str(e)}")
                return False
        # 对于FAISS向量存储
        elif hasattr(self.vector_store, 'delete'):
            try:
                # FAISS需要文档索引，这里简化处理
                # 实际应用中可能需要维护ID到索引的映射
                return False
            except Exception as e:
                logger.error(f"删除文档失败: {str(e)}")
                return False
        else:
            raise NotImplementedError(f"不支持的向量存储类型: {type(self.vector_store)}")

    def delete_documents(self, document_ids: List[str]) -> int:
        """
        批量删除文档
        :param document_ids: 文档ID列表
        :return: 成功删除的文档数量
        """
        deleted_count = 0
        for doc_id in document_ids:
            if self.delete_document(doc_id):
                deleted_count += 1
        return deleted_count

    def list_documents(self) -> List[Dict[str, Any]]:
        """
        列出所有文档及其元数据
        :return: 文档信息列表，每个文档包含id和metadata
        """
        if not self.vector_store:
            self.vector_store = self._initialize_vector_store()

        if hasattr(self.vector_store, '_collection'):
            try:
                results = self.vector_store._collection.get()
                # 添加调试信息
                print(f"向量存储中的总文档数量: {len(results['ids'])} ")
                print(f"向量存储中的内部ID列表: {results['ids']} ")
                print(f"向量存储中的元数据列表: {results['metadatas']} ")
                
                # 提取文档信息
                documents = []
                for i, metadata in enumerate(results['metadatas']):
                    try:
                        doc_info = {
                            'vector_id': results['ids'][i],  # 向量存储内部ID
                            'metadata': metadata or {}
                        }
                        
                        # 提取文档ID（优先使用metadata中的id）
                        if isinstance(metadata, dict) and 'id' in metadata:
                            doc_info['id'] = metadata['id']
                        else:
                            doc_info['id'] = results['ids'][i]
                        
                        documents.append(doc_info)
                    except:
                        # 处理元数据格式异常的情况
                        print(f"元数据格式异常，索引: {i}, 元数据: {metadata} ")
                        documents.append({
                            'vector_id': results['ids'][i],
                            'id': results['ids'][i],
                            'metadata': {}
                        })
                
                print(f"返回的文档信息数量: {len(documents)} ")
                return documents
            except Exception as e:
                print(f"获取文档列表失败: {str(e)} ")
                return []
        else:
            raise NotImplementedError(f"不支持的向量存储类型: {type(self.vector_store)} ")

    def list_document_ids(self) -> List[str]:
        """
        列出所有文档ID
        :return: 文档ID列表
        """
        documents = self.list_documents()
        return [doc['id'] for doc in documents]

    def find_duplicate_documents(self, threshold: float = 0.95) -> List[List[str]]:
        """
        查找重复文档
        :param threshold: 相似度阈值，高于此值的文档被认为是重复的
        :return: 重复文档组列表，每组包含相似文档的ID
        """
        if not self.vector_store:
            self.vector_store = self._initialize_vector_store()

        # 获取所有文档
        doc_ids = self.list_document_ids()
        if not doc_ids:
            return []

        # 存储重复文档组
        duplicate_groups = []
        processed_ids = set()

        # 对每个文档进行相似性检索
        for doc_id in doc_ids:
            if doc_id in processed_ids:
                continue

            # 查询文档内容
            try:
                if hasattr(self.vector_store, '_collection'):
                    results = self.vector_store._collection.get(where={'id': doc_id})
                    if not results['ids']:
                        continue
                    document_content = results['documents'][0]
                else:
                    # 对于其他向量存储类型，这里简化处理
                    continue

                # 相似性检索
                similar_docs = self.similarity_search_with_score(document_content, k=10)

                # 查找相似度高于阈值的文档
                duplicates = [doc_id]
                for doc, score in similar_docs:
                    # 相似度分数通常越低表示越相似
                    if score < (1.0 - threshold) and doc.metadata.get('id') != doc_id:
                        similar_doc_id = doc.metadata.get('id')
                        if similar_doc_id and similar_doc_id not in processed_ids:
                            duplicates.append(similar_doc_id)
                            processed_ids.add(similar_doc_id)

                if len(duplicates) > 1:
                    duplicate_groups.append(duplicates)

                processed_ids.add(doc_id)
            except Exception as e:
                logger.error(f"查找重复文档时出错: {str(e)}")
                continue

        return duplicate_groups

    def remove_duplicates(self, threshold: float = 0.95) -> int:
        """
        删除重复文档，保留每组中的一个
        :param threshold: 相似度阈值
        :return: 删除的文档数量
        """
        duplicate_groups = self.find_duplicate_documents(threshold)
        deleted_count = 0

        for group in duplicate_groups:
            # 保留第一个文档，删除其余的
            for doc_id in group[1:]:
                if self.delete_document(doc_id):
                    deleted_count += 1

        return deleted_count

if __name__ == "__main__":
    # 示例用法
    from loaders import DocumentLoader
    from text_splitter import DocumentSplitter

    try:
        # 1. 加载文档
        print("加载文档...")
        loader = DocumentLoader()
        documents = loader.load_documents_from_directory("../../examples/docs")
        print(f"加载文档数量: {len(documents)}")

        # 2. 分割文档
        print("分割文档...")
        splitter = DocumentSplitter(chunk_size=500, chunk_overlap=100)
        chunks = splitter.split_documents(documents)
        print(f"分割后文档块数量: {len(chunks)}")

        # 3. 初始化向量存储管理器 (使用Ollama模型)
        print("初始化向量存储...")
        vector_store = VectorStoreManager(
            embedding_model="ollama",
            vector_store_type="chroma",
            persist_directory="./vector_db_chroma",
            model_name="qwen3:4b"
        )

        # 4. 添加文档到向量存储
        print("添加文档到向量存储...")
        doc_ids = vector_store.add_documents(chunks)
        print(f"添加文档ID数量: {len(doc_ids)}")

        # 5. 持久化存储
        vector_store.persist()
        print("向量存储已持久化")

        # 6. 测试相似性检索
        query = "什么是RAG技术?"
        print(f"检索相似文档: {query}")
        results = vector_store.similarity_search(query, k=3)
        for i, result in enumerate(results):
            print(f"\n结果 {i+1}:")
            print(f"来源: {result.metadata.get('source', '未知')}")
            print(f"内容: {result.page_content[:200]}...")

    except Exception as e:
        print(f"运行示例时出错: {str(e)}")