# langchain框架用例---文档问答系统
import os

from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.vectorstores import DocArrayInMemorySearch
from IPython.display import display, Markdown
from langchain.indexes import VectorstoreIndexCreator
from langchain.embeddings import OpenAIEmbeddings

api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"
file = 'OutdoorClothingCatalog_1000.csv'

def get_langchain_qa_1():
    loader = CSVLoader(file_path=file,encoding='utf-8')

    index = VectorstoreIndexCreator(
        vectorstore_cls=DocArrayInMemorySearch
    ).from_loaders([loader])

    query ="Please list all your shirts with sun protection \
    in a table in markdown and summarize each one."
    response = index.query(query)
    display(Markdown(response))

def get_langchain_qa_2():
    # 方式一 五个步骤实现文档查询问答
    # 1、文件加载器加载文件
    loader = CSVLoader(file_path=file, encoding='utf-8')
    docs = loader.load()
    print(docs[2])
    # 2、创建openai的embedding词向量
    embeddings = OpenAIEmbeddings()
    embed = embeddings.embed_query("Hi my name is Harrison")
    print(len(embed))
    print(embed[:5])
    # 3、存储（文档列表+embedding对象）在一个向量存储中（实际可能是向量数据库）
    db = DocArrayInMemorySearch.from_documents(
        docs,
        embeddings
    )
    # 4、创建检索器、导入语言模型
    retriever = db.as_retriever()
    chat = ChatOpenAI(api_key=api_key, temperature=0.9)
    qdocs = "".join([docs[i].page_content for i in range(len(docs))])
    response = chat.call_as_llm(f"{qdocs} Question: Please list all your \
    shirts with sun protection in a table in markdown and summarize each one.")
    display(Markdown(response))
    # 5、封装至langchain链
    qa_stuff = RetrievalQA.from_chain_type(
        llm=chat,
        chain_type="stuff",
        retriever=retriever,
        verbose=True
    )
    # 6、查询测试
    query = "Please list all your shirts with sun protection in a table \
    in markdown and summarize each one."
    response = qa_stuff.run(query)
    display(Markdown(response))

    # 方式二 langchain一行代码也能实现
    # response = index.query(query, llm=chat)
    # index = VectorstoreIndexCreator(
    #     vectorstore_cls=DocArrayInMemorySearch,
    #     embedding=embeddings,
    # ).from_loaders([loader])


if __name__ == '__main__':
    # get_langchain_qa_1()
    get_langchain_qa_2()