import os 
# 1.Load 导入Document Loaders
from langchain_community.document_loaders import PyPDFLoader
# 导入 Docx2txtLoader
from langchain_community.document_loaders import Docx2txtLoader
# 导入 TextLoader
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Qdrant
from langchain_openai.chat_models import ChatOpenAI
# 加载Documents
base_dir = 'OneFlower' # 文档的存放目录
documents = []
for file in os.listdir(base_dir):
    # 构建完整的文件路径
    file_path = os.path.join(base_dir, file)
    if file.endswith('.pdf'):
        loader = PyPDFLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.docx'):
        loader = Docx2txtLoader(file_path)
        documents.extend(loader.load())
    elif file.endswith('.txt'):
        # print(file_path)
        loader = TextLoader(file_path)
        # print(loader)
        documents.extend(loader.load())
# Split 将 Documents 切分成块以便后续进行嵌入和向量存储
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=10)
# print(documents)
chunked_documents = text_splitter.split_documents(documents)
vectorstore = Qdrant.from_documents(
     chunked_documents, # 以分块的文档
      OpenAIEmbeddings(), # 用OpenAI的Embedding Model做嵌入
    location=":memory:",  # in-memory 存储
    collection_name="my_documents",) # 指定collection_name
# 4. Retrieval 准备模型和Retrieval链
import logging # 导入Logging工具
# from langchain.chat_models import ChatOpenAI # ChatOpenAI模型
from langchain.retrievers.multi_query import MultiQueryRetriever # MultiQueryRetriever工具
from langchain.chains import RetrievalQA # RetrievalQA链
# 设置Logging
logging.basicConfig()
logging.getLogger('langchain.retrievers.multi_query').setLevel(logging.INFO)
# 实例化一个大模型工具 - OpenAI的GPT-3.5
# llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
llm = ChatOpenAI(
         model_name="gpt-3.5-turbo",
        max_tokens=100,
        verbose=True,
      )
retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)
# 实例化一个RetrievalQA链
qa_chain = RetrievalQA.from_chain_type(llm,retriever=retriever_from_llm)
result = qa_chain({"query": "病假的薪资怎么发"});
print(result)
 