# langchain框架用例---评估 Evaluation

import os
import langchain
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.evaluation.qa import QAGenerateChain
from langchain.evaluation.qa import QAEvalChain


api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"
file = 'OutdoorClothingCatalog_1000.csv'


def get_langchain_evaluation():
    loader = CSVLoader(file_path=file, encoding='utf-8')
    data = loader.load()

    index = VectorstoreIndexCreator(
        vectorstore_cls=DocArrayInMemorySearch
    ).from_loaders([loader])

    chat = ChatOpenAI(api_key=api_key, temperature=0.0)
    qa = RetrievalQA.from_chain_type(
        llm=chat,  # 语言模型
        chain_type="stuff",  # 链式类型
        retriever=index.vectorstore.as_retriever(),  # 检索器
        verbose=True,  # 是否打印
        chain_type_kwargs={
            "document_separator": "<<<<>>>>>"  # 检索QA链路
        }
    )

    examples = [
        {
            "query": "Do the Cozy Comfort Pullover Set\
            have side pockets?",
            "answer": "Yes"
        },
        {
            "query": "What collection is the Ultra-Lofty \
            850 Stretch Down Hooded Jacket from?",
            "answer": "The DownTek collection"
        }
    ]
    # 使用Langchain中的QA生产链 生产评估问题和答案
    example_gen_chain = QAGenerateChain.from_llm(ChatOpenAI())
    new_examples = example_gen_chain.apply_and_parse(
        [{"doc": t} for t in data[:5]]
    )
    examples += new_examples

    # 链条调试（link chaindebug）
    # langchain.debug = True
    langchain.debug = False
    qa.run(examples[0]["query"])

    # LLM评估
    predictions = qa.apply(examples)
    print(predictions)
    chat = ChatOpenAI(api_key=api_key, temperature=0.0)
    eval_chain = QAEvalChain.from_llm(chat)
    graded_outputs = eval_chain.evaluate(examples, predictions)
    for i, eg in enumerate(examples):
        print(f"Example {i}:")
        print("Question: " + predictions[i]['query'])
        print("Real Answer: " + predictions[i]['answer'])
        print("Predicted Answer: " + predictions[i]['result'])
        print("Predicted Grade: " + graded_outputs[i]['text'])
        print()

if __name__ == '__main__':
    get_langchain_evaluation()
