from langchain_community.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders import CSVLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.embeddings import ModelScopeEmbeddings
from langchain_community.vectorstores import Qdrant
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.chains import RetrievalQA

import os

# 连接本地部署的OpenAI服务
model = ChatOpenAI(
    streaming=True,
    verbose=True,
    callbacks=[],
    openai_api_key="none",
    openai_api_base="http://127.0.0.1:8000/v1",
    model_name="qwen/Qwen1.5-0.5B-Chat",
    temperature=0,
)


# 加载Documents
base_dir = "./files"  # 文档的存放目录
documents = []
for file in os.listdir(base_dir):
    # 完整的文件路径
    file_path = os.path.join(base_dir, file)
    if file.endswith(".pdf"):
        loader = PyPDFLoader(file_path)
    elif file.endswith(".docx"):
        loader = Docx2txtLoader(file_path)
    elif file.endswith(".txt"):
        loader = TextLoader(file_path)
    elif file.endswith(".csv"):
        loader = CSVLoader(file_path)
    documents.extend(loader.load())

print(f"documents content: {documents}")

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=200, chunk_overlap=10
)  # 文档分割器
chunked_documents = text_splitter.split_documents(documents)

# 创建 embeedings
model_id = "damo/nlp_corom_sentence-embedding_chinese-base"
embeddings = ModelScopeEmbeddings(model_id=model_id)


# 加载文档到向量数据库
vectorstore = Qdrant.from_documents(
    documents=chunked_documents,  # 以分块的文档
    embedding=embeddings,  # 用OpenAI的Embedding Model做嵌入
    location=":memory:",  # in-memory 存储
    collection_name="documents",  # 指定collection_name
)

# 构建一个MultiQueryRetriever
retriever_from_llm = MultiQueryRetriever.from_llm(
    retriever=vectorstore.as_retriever(), 
    llm=model
)

# 实例化一个RetrievalQA链
qa_chain = RetrievalQA.from_chain_type(model, retriever=retriever_from_llm)
