from langchain.chains import RetrievalQA  # 检索QA链，在文档上进行检索
from langchain.chat_models import ChatOpenAI  # openai模型
from langchain.document_loaders import CSVLoader  # 文档加载器，采用csv格式存储
from langchain.indexes import VectorstoreIndexCreator #导入向量存储索引创建器
from langchain.vectorstores import DocArrayInMemorySearch #向量存储
import pandas as pd
from langchain.indexes import VectorstoreIndexCreator
from langchain_openai.embeddings import AzureOpenAIEmbeddings  # 导入嵌入模型
from openai import RateLimitError
from langchain_openai import AzureChatOpenAI
from tool import get_azure_endpoint,get_api_key,get_api_version

from langchain.evaluation.qa import QAGenerateChain #导入QA生成链，它将接收文档，并从 每个文档中创建一个问题答案对
# 下面是langchain.evaluation.qa.generate_prompt中的源码，在template的最后加上“请使用中 文输出”
from langchain.output_parsers.regex import RegexParser
from langchain.prompts import PromptTemplate
from langchain.base_language import BaseLanguageModel
from typing import Any

if __name__ == '__main__':
    # 加载中文数据
    file = './product_data.csv'
    loader = CSVLoader(file_path=file)
    data = loader.load()
    # 查看数据


    test_data = pd.read_csv(file, skiprows=0)
    print(test_data.head())

    embeddings = AzureOpenAIEmbeddings(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        model="text-embedding-3-small",  # 重命名为 azure_deployment
        api_key=get_api_key(),
        api_version=get_api_version()
    )
    # 将指定向量存储类,创建完成后，我们将从加载器中调用,通过文档记载器列表加载
    index = VectorstoreIndexCreator(
        vectorstore_cls=DocArrayInMemorySearch, embedding=embeddings).from_loaders([loader])

    llm = AzureChatOpenAI(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        azure_deployment="gpt-4o",  # 重命名为 azure_deployment
        model_name="gpt-4o",
        openai_api_version=get_api_version(),  # 参数名不变
        openai_api_key=get_api_key(),
        openai_api_type="azure",
        temperature=0.0,
    )

    qa = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=index.vectorstore.as_retriever(),
        verbose=True,
        chain_type_kwargs={
            "document_separator": "<<<<>>>>>"
        }
    )

    print(data[10])
    print(data[11])

    examples = [{
        "query": "高清电视机怎么进行护理?",
        "answer": "使用干布清洁。"},
        {
            "query": "旅行背包有内外袋吗?", "answer": "有。"
        }]

    template = """You are a teacher coming up with questions to ask on a quiz.
    Given the following document, please generate a question and answer based on that
    document.
    Example Format:
    <Begin Document>
    ...
    <End Document>
    QUESTION: question here
    ANSWER: answer here
    These questions should be detailed and be based explicitly on information in the
    document. Begin!
    <Begin Document> {doc}
    <End Document> 请使用中文输出，注意query和answer不要换
    """

    output_parser = RegexParser(
        regex=r"QUESTION: (.*?)\nANSWER: (.*)", output_keys=["query", "answer"]
    )
    PROMPT = PromptTemplate(
        input_variables=["doc"], template=template, output_parser=output_parser
    )


    # 继承QAGenerateChain
    class ChineseQAGenerateChain(QAGenerateChain):
        """LLM Chain specifically for generating examples for question answering."""

        @classmethod
        def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
            """Load QA Generate Chain from LLM."""
            return cls(llm=llm, prompt=PROMPT, **kwargs)


    example_gen_chain = ChineseQAGenerateChain.from_llm(llm)  # 通过传递chat open AI语言模型来创建这个链
    new_examples = example_gen_chain.apply([{"doc": t} for t in data[:5]])
    # 查看用例数据
    print(new_examples)

    print(new_examples[0])

    print(data[0])

    examples += [v for item in new_examples for k, v in item.items()]
    import langchain
    langchain.debug = True
    response = qa.run(examples[0]["query"])
    print(response)