#############################################################################
# 本脚本展示使用HuggingFace的Bert模型进行训练后的动态量化, 代码参考如下连接:
# https://percent4.github.io/archives/
#
# 
#                                                       Author： Xian Yang
#                                                     Date Time： 2023.11.27
##############################################################################

import time
import torch
import datasets
import numpy as np


# 导入transformers库中的自动tokenizer工具，含padding功能的dataloader工具
from transformers    import AutoTokenizer, DataCollatorWithPadding
from transformers    import AutoModelForSequenceClassification
from transformers    import Trainer, TrainingArguments
from sklearn.metrics import accuracy_score, precision_recall_fscore_support

MAX_LENGTH = 300

device     = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
checkpoint = f"../outputs/imdb_trainer/checkpoint-2346"
model      = AutoModelForSequenceClassification.from_pretrained(checkpoint).to(device)

tokenizer  = AutoTokenizer.from_pretrained(checkpoint)

def tokenize_function(sample):
    return tokenizer(sample['text'], max_length=300, truncation=True)

# 对数据集进行批量tokenize操作
raw_datasets       = datasets.load_dataset('imdb')     
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator      = DataCollatorWithPadding(tokenizer=tokenizer)

print(type(tokenized_datasets))


# s_time = time.time()
# true_labels, pred_labels = [], [] 
# for i, row in test_df.iterrows():
#     row_s_time = time.time()
#     true_labels.append(row["label"])
#     encoded_text = tokenizer(row['text'], max_length=MAX_LENGTH, truncation=True, padding=True, return_tensors='pt').to(device)
#     # print(encoded_text)
#     logits = model(**encoded_text)
#     label_id = np.argmax(logits[0].detach().cpu().numpy(), axis=1)[0]
#     pred_labels.append(label_id)
#     print(i, (time.time() - row_s_time)*1000, label_id)

# print("avg time: ", (time.time() - s_time) * 1000 / test_df.shape[0])