import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.generation import GenerationConfig
from langchain.prompts import ChatPromptTemplate
import json
from tqdm import tqdm

# origin version
# rag_prompt = \
#     """给定问题：{question}
# 检索结果：{retrieval_answer}
# 请阅读理解上面多个检索结果，正确地回答问题。只能根据相关的检索结果或者知识回答，禁止编造；如果没有相关结果，请回答“都不相关，我不知道”。"""

# v2
rag_prompt = \
    """给定问题：{question}
检索结果：{retrieval_answer}
请阅读理解上面多个检索结果，正确地回答问题。只能根据相关的检索结果或者知识回答，禁止编造；如果没有相关结果，请回答“都不相关，我不知道”。要求回答尽可能精简，只回答问题直接相关的内容。"""

rag_template = ChatPromptTemplate.from_template(rag_prompt)


def llm_answer(model_path, retrieval_path, answer_path, gpu_id):
    tokenizer = AutoTokenizer.from_pretrained(
        model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_path, device_map=gpu_id, trust_remote_code=True, bf16=True).eval()
    model.generation_config = GenerationConfig.from_pretrained(
        model_path, trust_remote_code=True)
    model = model.eval()

    with open(retrieval_path, "r", encoding="utf-8") as f, \
            open(answer_path, "w", encoding="utf-8") as f2:
        questions_list = json.load(f)
        for question in tqdm(questions_list):
            retrieval_answer1 = question["answer_1"]
            retrieval_answer2 = question["answer_2"]
            retrieval_answer3 = question["answer_3"]
            question0 = question["question"]

            prompt1 = rag_template.format(
                question=question0, retrieval_answer=retrieval_answer1)
            prompt2 = rag_template.format(
                question=question0, retrieval_answer=retrieval_answer2)
            prompt3 = rag_template.format(
                question=question0, retrieval_answer=retrieval_answer3)

            response1, _ = model.chat(tokenizer, prompt1, history=[])
            response2, _ = model.chat(tokenizer, prompt2, history=[])
            response3, _ = model.chat(tokenizer, prompt3, history=[])

            question["answer_1"] = response1
            question["answer_2"] = response2
            question["answer_3"] = response3
        json.dump(questions_list, f2, ensure_ascii=False)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path', type=str,
                        default='model.pth', help='Path to the model file')
    parser.add_argument('--retrieval_path', type=str,
                        default='retrieval.txt', help='Path to the retrieval file')
    parser.add_argument('--answer_path', type=str,
                        default='answer.txt', help='Path to the answer file')
    parser.add_argument('--gpu_id', type=str,
                        default="cuda:0", help='GPU ID to use')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    print(args.model_path)
    print(args.retrieval_path)
    print(args.answer_path)
    print(args.gpu_id)
    llm_answer(args.model_path, args.retrieval_path,
               args.answer_path, args.gpu_id)
