from datetime import datetime

from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from config.settings import settings
from langchain_core.documents import Document
import os

class VectorStoreService:
    def __init__(self):
        print(settings.MODEL_CACHE_PATH)
        self.embeddings = HuggingFaceEmbeddings(
            model_name=settings.EMBEDDING_MODEL,
            cache_folder = settings.MODEL_CACHE_PATH  # 指定模型缓存目录
        )
        print("获取存储对象")
        self.vector_store = self.load_vector_store()

    def create_vector_store(self, documents):
        self.vector_store = FAISS.from_documents(
            documents,
            self.embeddings
        )
        print("已保存向量库"+settings.VECTOR_DB_PATH)
        self.vector_store.save_local(settings.VECTOR_DB_PATH)

    def load_vector_store(self):
        """加载现有的向量存储"""
        if os.path.exists(settings.VECTOR_DB_PATH):
            index_file = os.path.join(settings.VECTOR_DB_PATH, "index.faiss")
            if os.path.exists(index_file):
                print(f"加载向量库: {settings.VECTOR_DB_PATH}")
                return FAISS.load_local(
                    settings.VECTOR_DB_PATH,
                    self.embeddings,
                    allow_dangerous_deserialization=True
                )
            else:
                print(f"向量库索引文件不存在: {index_file}")
        else:
            print(f"向量库目录不存在: {settings.VECTOR_DB_PATH}")
        return None

    def similarity_search(self, query, k=3):
        if not self.vector_store:
            return []
        return self.vector_store.similarity_search(query, k=k)

    def similarity_search_qa(self, query, k=3, filter_metadata=None):
        """
        检索相似的问题，并将问题和答案组合返回

        Args:
            query (str): 查询问题
            k (int): 返回结果数量
            filter_metadata:
        Returns:
            list: 包含问题和答案组合的文档列表

        """
        if not self.vector_store:
            return []

        # 执行相似性搜索，支持metadata过滤
        if filter_metadata:
            docs = self.vector_store.similarity_search(query, k=k, filter=filter_metadata)
        else:
            docs = self.vector_store.similarity_search(query, k=k)

        # 组合问题和答案
        combined_docs = []
        for doc in docs:
            # 从metadata中获取答案
            answer = doc.metadata.get("answer", "")
            if answer:
                # 组合问题和答案
                combined_content = f"问题：{doc.page_content}\n回答：{answer}"
            else:
                # 如果没有答案，则只保留原内容
                combined_content = doc.page_content

            # 创建新的文档对象，使用组合后的内容

            combined_doc = Document(
                page_content=combined_content,
                metadata=doc.metadata
            )
            combined_docs.append(combined_doc)

        return combined_docs


    def add_documents(self, documents):
        # 向现有向量存储添加文档
        self.vector_store.add_documents(documents)
        # 保存更新后的索引
        self.vector_store.save_local(settings.VECTOR_DB_PATH)
        print(f"已向向量库添加 {len(documents)} 个新文档块")

    def add_texts(self, texts, created_by="admin", category="answer",
                  source="manual_addition", product_type="qnn_zd"):
        """直接添加文本到向量存储"""
        if not self.vector_store:
            raise ValueError("向量存储未初始化，请先创建向量存储")
        else:
            # 为每个文本创建对应的metadata
            current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            metadata = [
                {
                    "source": source,
                    "category": category,
                    "product_type": product_type,
                    "valid_from": current_time,
                    "valid_to": "2099-12-31",
                    "added_by": created_by,
                    "added_date": current_time
                }
                for _ in texts
            ]
            added_ids = self.vector_store.add_texts(texts, metadata)
            self.vector_store.save_local(settings.VECTOR_DB_PATH)
            print(f"已向向量库添加 {len(texts)} 个新文本")
            return added_ids

    def add_qa_pairs(self, questions: list, answers: list, created_by="api_user",
                     category="qa_pair", source="api", product_type="qnn_zd"):
        """
        添加问答对到向量存储，问题作为文本内容，答案存储在metadata中

        Args:
            questions (list): 问题列表
            answers (list): 答案列表，与问题列表一一对应
            created_by (str): 创建者
            category (str): 分类
            source (str): 来源
            product_type (str): 产品类型

        Returns:
            list: 添加的文档ID列表
        """
        if not self.vector_store:
            raise ValueError("向量存储未初始化，请先创建向量存储")

        if len(questions) != len(answers):
            raise ValueError("问题列表和答案列表长度必须相同")

        # 问题作为文本内容
        texts = questions

        # 为每个问题创建对应的metadata，包含对应答案
        current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        metadata = [
            {
                "source": source,
                "category": category,
                "product_type": product_type,
                "valid_from": current_time,
                "valid_to": "2099-12-31",
                "added_by": created_by,
                "added_date": current_time,
                "type": "qa_pair",
                "answer": answer  # 答案存储在metadata中
            }
            for answer in answers  # 与questions一一对应
        ]

        # 添加到向量存储
        added_ids = self.vector_store.add_texts(texts, metadata)
        self.vector_store.save_local(settings.VECTOR_DB_PATH)
        print(f"已向向量库添加 {len(texts)} 个问答对")
        return added_ids

    def list_all_documents(self, limit=None):
        """
        列出所有文档（可限制数量）

        Args:
            limit: 限制返回的文档数量，None表示返回所有

        Returns:
            list: 包含文档信息的列表
        """
        if not self.vector_store:
            raise ValueError("向量存储未初始化")

        all_documents = []
        doc_dict = self.vector_store.docstore._dict

        # 如果设置了限制，只处理前limit个文档
        doc_items = list(doc_dict.items())
        if limit:
            doc_items = doc_items[:limit]

        for doc_id, document in doc_items:
            doc_info = {
                "id": doc_id,
                "content": document.page_content,
                "metadata": document.metadata
            }
            all_documents.append(doc_info)

        return all_documents

    def delete_document_by_id(self, doc_id: str):
        """
        根据文档ID删除特定文档

        Args:
            doc_id: 要删除的文档ID

        Returns:
            bool: 删除是否成功
        """
        if not self.vector_store:
            raise ValueError("向量存储未初始化")

        try:
            # 删除指定ID的文档
            self.vector_store.delete([doc_id])
            # 保存更新后的向量库
            self.vector_store.save_local(settings.VECTOR_DB_PATH)
            print(f"已删除文档 ID: {doc_id}")
            return True
        except Exception as e:
            print(f"删除文档时出错: {str(e)}")
            return False

    def delete_documents_by_ids(self, doc_ids: list):
            """
            根据文档ID列表删除多个文档

            Args:
                doc_ids: 要删除的文档ID列表

            Returns:
                int: 删除的文档数量
            """
            if not self.vector_store:
                raise ValueError("向量存储未初始化")

            if not doc_ids:
                print("文档ID列表为空")
                return 0

            try:
                # 删除指定ID的文档
                self.vector_store.delete(doc_ids)
                # 保存更新后的向量库
                self.vector_store.save_local(settings.VECTOR_DB_PATH)
                deleted_count = len(doc_ids)
                print(f"已删除 {deleted_count} 个文档")
                return deleted_count
            except Exception as e:
                print(f"删除文档时出错: {str(e)}")
                return 0