import os
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from torch.xpu import device
from sentence_transformers import SentenceTransformer
from DataloaderModule import DataloaderModule


class EmbeddingStorageModule:
    def __init__(self,data_loader,faiss_index_path = "faiss_index"):
        self.data_loader = data_loader #加载后的数据
        # self.embeddings = OpenAIEmbeddings() #嵌入模型
        self.embeddings = HuggingFaceEmbeddings(
            model_name=r"model/bge-large-zh",
            model_kwargs={
                'device': 'cuda',
            }
        )
        self.faiss_index_path = faiss_index_path #FAISS向量存储路径
        self.vector_store = None #初始化向量存储对象
    #嵌入向量
    def generate_embeddings(self):
        documents = self.data_loader.documents
        print(f"正在为{len(documents)}个文档生成嵌入向量")
        documents_embeddings = [self.embeddings.embed_query(doc.page_content)
                               for doc in documents]
        return documents_embeddings
    #创建FAISS存储并保存到本地
    def create_faiss_vector_store(self):
        print("开始创建向量存储...")
        self.vector_store = FAISS.from_documents(self.data_loader.documents, self.embeddings)
        print("持久化向量到本地磁盘...")
        self.vector_store.save_local(self.faiss_index_path)
        print(f"FAISS向量存储已保存到{self.faiss_index_path}")
    #加载向量存储
    def load_faiss_vector_store(self):
        if os.path.exists(self.faiss_index_path):
            print(f"加载本地FAISS向量存储{self.faiss_index_path}")
            self.vector_store = FAISS.load_local(
                self.faiss_index_path,
                self.embeddings,
                allow_dangerous_deserialization=True  # 新增此行
            )
        else:
            print(f"未检测到向量存储文件{self.faiss_index_path}")

    def search_similar_documents(self, query,top_k = 10):
        if not self.vector_store:
            print(f"FAISS向量存储未加载，请先创建或加载存储")
            return []
        #将文本嵌入为向量
        # query_embedding = self.embeddings.embed(query)
        silimar_docs = self.vector_store.similarity_search(query, k=top_k)
        # silimar_docs = self.vector_store.similar_documents(query_embedding,top_k=top_k)
        print(f"查询{query}问题的相关内容:\n")
        for i,doc in enumerate(silimar_docs):
            print(f"文档{i+1}:\n{doc.page_content[:200]}\n\n")

        return [doc.page_content for doc in silimar_docs]

if __name__ == "__main__":
    data_loader = DataloaderModule(directory_path="./data")
    data_loader.load_all_documents(markdown_path="./data")
    data_loader.display_summary()
    #初始化嵌入生成与存储模块
    embeddings_storage = EmbeddingStorageModule(data_loader)
    #生成并嵌入存储
    embeddings_storage.generate_embeddings()
    embeddings_storage.create_faiss_vector_store()
    #加载并执行相似性搜索
    embeddings_storage.load_faiss_vector_store()
    query = "YOLOv8"
    data = embeddings_storage.search_similar_documents(query=query,top_k=3)
    print(data)

