from langchain_community.llms import Tongyi
# 文档加载
from langchain_community.document_loaders import TextLoader
# 文档切分
from langchain.text_splitter import CharacterTextSplitter


# Faiss相关
from langchain.embeddings import CacheBackedEmbeddings
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.storage import LocalFileStore
from langchain.chains import RetrievalQA

class FAISSDBUtils():
    def __init__(self):
        # 嵌入
        self.embeddings = DashScopeEmbeddings()
        # 初始化缓存存储器
        # self.store = LocalFileStore('../../../jupyter_workspace/P4-2408A-Langchain/day08/cache/')
        # self.store = LocalFileStore('../../../../../../../workspace/jupyter_workspace/day08/cache/')
        self.store = LocalFileStore('/cache/')
        # 创建支持缓存的嵌入器
        print('---4')
        self.cached_embdder = CacheBackedEmbeddings.from_bytes_store(self.embeddings, self.store, namespace=self.embeddings.model)
        print('---')
    def add(self,chunks,key):
        """
        params:
            chunks:切分好的文档
            key: 数据库key
        """
        db = FAISS.from_documents(chunks,self.cached_embdder)
        db.save_local(key)

    def search(self,qa,count,key):
        """
         Args:
            qa: 需要查询的问题
            count: 需要查询的结果数量
            key: 数据库key
        """
        db = FAISS.load_local(key,self.cached_embdder,allow_dangerous_deserialization=True)
        res = db.similarity_search(qa,k=count)
        return res

    def retrieval(self,qa,key):
        """
        通过检索器对象，检索数据
        qa：检索问题
        key：数据库key
        """
        db = FAISS.load_local(key, self.cached_embdder, allow_dangerous_deserialization=True)
        llm = Tongyi()
        qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever())

        res = qa.invoke(qa)
        # print(res.get('result'))
        return res