import datasets
from datasets import load_dataset
from logging import log
from transformers import AutoTokenizer, AutoModelForCausalLM
import argparse
from tqdm import tqdm
from transformers.utils import logging

logging.get_logger("transformers").setLevel(logging.ERROR)

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="the model to fine-tune", default="/media/zhaogang/4T2-2(大语言模型)/HuggingFace/models/iiBcai/gpt2")
parser.add_argument("--ckpt", type=str, help="checkpoint path", default="output/checkpoint-2000")
parser.add_argument("--split", type=str, help="data split", default="validation")
args = parser.parse_args()

DATASET_NAME = "rotten_tomatoes"  # 数据集名称
DATA_BODY_KEY = "text"
DATA_LABEL_KEY = "label"

# 加载训练后的 checkpoint
model = AutoModelForCausalLM.from_pretrained(args.ckpt)

# 模型设为推理模式
model.eval()

# 加载 tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.model)

# 加载数据集
raw_datasets = load_dataset(DATASET_NAME)
eval_dataset = raw_datasets[args.split]

named_labels = ['neg', 'pos']

label_ids = [
    tokenizer(named_labels[i], add_special_tokens=False)["input_ids"][0]
    for i in range(len(named_labels))
]

correct = 0
total = 0

for i in tqdm(range(len(eval_dataset[DATA_BODY_KEY]))):
    prompt = f"{eval_dataset[DATA_BODY_KEY][i]} Sentiment: "
    inputs = tokenizer(prompt, add_special_tokens=False, return_tensors="pt")
    label = label_ids[eval_dataset[DATA_LABEL_KEY][i]]

    output = model.generate(**inputs, do_sample=False, max_new_tokens=1)

    predict = output[0][-1]
    if label == predict:
        correct += 1
    total += 1

print(f"Accuracy: {correct}/{total}")
print(f"Accuracy: {correct / total}")
