from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.llms.tongyi import Tongyi
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import CharacterTextSplitter
#
# store = LocalFileStore('./cache/')
# embeddings = DashScopeEmbeddings()
# cached_embeddings = CacheBackedEmbeddings.from_bytes_store(embeddings, store, namespace=embeddings.model)
# doc = TextLoader("new1.txt").load()
# text_spliter = CharacterTextSplitter('\n', chunk_size=100, chunk_overlap=0)
# chunk = text_spliter.split_documents(doc)
# db = FAISS.from_documents(chunk, cached_embeddings)
#
# key = "new_index"
#
# db.save_local(key)
#
#
# store = LocalFileStore('./cache/')
#
# # 实例化向量嵌入器
# embeddings = DashScopeEmbeddings()
# # 支持缓存得嵌入器
# cached_embeddings = CacheBackedEmbeddings.from_bytes_store(embeddings, store,namespace=embeddings.model)
# key = 'new_index'  # 唯一得索引
# db = FAISS = FAISS.load_local(key, cached_embeddings, allow_dangerous_deserialization=True)
# res = db.similarity_search('政策讨论怎么样？',k=3)
# # print(res)
#
# prompt = ""
# for i in res:
#     prompt += i.page_content + "\n"
#
# print(prompt)
#
# prompt += '政策讨论怎么样？' + "\n"
#
#
# llm = Tongyi()
#
# ret = llm.invoke(prompt)
#
# print(ret)


# store = LocalFileStore('./cache/')
# embeddings = DashScopeEmbeddings()
# cached_embeddings = CacheBackedEmbeddings.from_bytes_store(embeddings, store,namespace=embeddings.model)
# key = 'new_index'  # 唯一得索引
# llm = Tongyi()
# db = FAISS = FAISS.load_local(key, cached_embeddings,allow_dangerous_deserialization=True)
# qa = RetrievalQA.from_chain_type(llm=llm,chain_type="stuff",retriever=db.as_retriever())
# q_res = qa.invoke("政策怎么样？")
# print(q_res)