from langchain_community.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.llms import OpenAI
from langchain.chains import VectorDBQA
from langchain_community.document_loaders import DirectoryLoader
from langchain_openai import OpenAIEmbeddings
import os

os.environ['OPENAI_API_KEY'] = 'sk-ioXCJ3W2IQlpDzHu268aEeF7CcAf4b1f9f8c46A248C74a9d'
os.environ['OPENAI_BASE_URL'] = 'https://api.xiaoai.plus/v1'

if __name__ == '__main__':
    # 加载文件夹中的所有txt类型的文件
    loader = DirectoryLoader('D:\work\PriceProject\python\langchain\content', glob='**/*.txt')
    # 将数据转成 document 对象，每个文件会作为一个 document
    documents = loader.load()

    # 初始化加载器
    text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0)
    # 切割加载的 document
    split_docs = text_splitter.split_documents(documents)

    # 初始化 openai 的 embeddings 对象
    embeddings = OpenAIEmbeddings()
    embeddings.model = 'gpt-3.5-turbo'
    # 将 document 通过 openai 的 embeddings 对象计算 embedding 向量信息并临时存入 Chroma 向量数据库，用于后续匹配查询
    docsearch = Chroma.from_documents(split_docs, embeddings)

    # 创建问答对象
    qa = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=docsearch,return_source_documents=True)
    # 进行问答,对txt中的文字查找
    result = qa({"query": "江小白为什么不行了？"})
    print(result)