import os
import time
import torch
import datasets
from random import sample
from utils import result_cal
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
)


# generate in-context learning prompt for mrpc
task = "Natural Language Inference"
queries = ""
label_map = {0: "Equivalent.", 1: "Not equivalent."}

data = datasets.load_dataset("glue", name="mrpc", cache_dir="/home/glf/data/cache", split="train")

# use 3 example of the training set as the prompt, motivate the model to generate training examples
for i in sample(range(len(data)), 3):
    # use the template file in the template folder to construct the queries
    with open("./template/MRPC_template.txt", "r", encoding="utf-8") as mrpc_template_file:
        mrpc_content = mrpc_template_file.read().format(
            sentence1=data[i]["sentence1"],
            sentence2=data[i]["sentence2"],
            label=label_map[data[i]["label"]]
        )
        queries += mrpc_content + "\n"

# use icl template file to construct icl prompt, which needs the task name and the concrete queries
with open("./template/ICL_template.txt", "r", encoding="utf-8") as icl_template_file:
    icl_content = icl_template_file.read().format(
        task=task,
        queries=queries
    )

cur_time = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(time.time()))
prompt_path = f"./prompt/mrpc_prompt_{cur_time}.txt"
with open(prompt_path, 'w', encoding='utf-8') as f:
    f.write(icl_content)


# perform in-context learning to generate instructions


device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

use_auth_token = os.environ["USE_AUTH_TOKEN"]

model = AutoModelForCausalLM.from_pretrained(
    "meta-llama/Llama-2-7b-chat-hf",
    cache_dir="/home/glf/data/cache",
    use_auth_token=use_auth_token
).to(device)

tokenizer = AutoTokenizer.from_pretrained(
    "meta-llama/Llama-2-7b-chat-hf",
    cache_dir="/home/glf/data/cache",
    use_auth_token=use_auth_token
)

with open(prompt_path, "r", encoding="utf-8") as f:
    prompt = f.read()
    response, _ = result_cal(model, tokenizer, prompt, temperature=1, max_new_tokens=3000)
    output_file_path = f"./instruction/mrpc_instructions_{cur_time}.txt"
    with open(output_file_path, "w", encoding="utf-8") as f:
        f.write(response[0])
