 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
import os

from dotenv import load_dotenv, find_dotenv
from langchain.retrievalQA import RetrievalQA
from langchain.chat_models import ChatOpenAi
from langchain.document_loaders import CSVLoader
# 向量存储的索引
from langchain.indexs import VectorstoreIndexCreator
# 内存中，向量存储的一种模式：文档数组？
from langchain.Vectorstore import DocArrayInMemorySearch
# 接收文档，并基于文档创建问题和答案书
from langchain.evaluation.qa import QAGenerationChain
from langchain.evaluation.qa import QAEvalChain
import langchain
langchain.debug = True

# read local .env file
_ = load_dotenv(find_dotenv()) 

# 获取csv 中的数据
file = "/Users/mamingchao/Downloads/巡检结果明细-PH-SSP-mysql-230823163104.xlsx"
loader = CSVLoader(file_path = file)
data = loader.load()

index = VectorstoreIndexCreator(
        Vectorstore_cls = DocArrayInMemorySearch
    ).findloaders(loader)

llm = ChatOpenAi(temperature = 0.0)
qa = RetrievalQA(
    llm = llm,
    retriever = index.vectorestore.as_retriever(),
    chain_type = "STUFF",
    chain_type_kwargs = {
        "document_seperator":"<<<<>>>>"
        }
)



example = [
    {
        "query":"Do the cozy Pullover set has side pocket?",
        "answer":"yes"
    },{
        "query":"What's collection is the Ultra-Lofty 850",
        "answer":"The DownTek collection"
    }
]

example_gen_chain = QAGenerationChain.from_llm(ChatOpenAi())
# 这里返回的就是一个查询和一个答案
new_example = example_gen_chain.apply_and_parse([{"doc":t} for t in data[:5]])

examples = []
examples += new_example

qa.run(example[0]["query"])

predictions = qa.apply(examples)


eval_chain = QAEvalChain.from_llm(llm)
graded_output = eval_chain.evaluate(examples, predictions)

for i, eg in enumerate(examples):
    print(f"Example{i}:")
    print("question:" + predictions[i]['query'])
    print("real answer:" + predictions[i]['answer'])
    print("predict answer:" + predictions[i]['result'])
    print("predict grade:" + graded_output[i]['text'])



