from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import numpy
from dataset_extractor import *
from sklearn.metrics import f1_score
import torch_directml as torch_dml

tokenizer = AutoTokenizer.from_pretrained('data/bert-base-chinese')
model_cls = AutoModelForSequenceClassification.from_pretrained('data/bert-base-legal-chinese-epoch-3', num_labels=202)
test_pair = extract("data/dataset/cail/exercise_contest/data_test.json")

from torch.utils.data import TensorDataset, DataLoader

batch_size = 50
test_batch = tokenizer(test_pair["content"], max_length=512, truncation=True, padding="max_length", return_tensors="pt")
test = TensorDataset(test_batch["input_ids"], test_batch["attention_mask"], torch.tensor(test_pair["label"]))
test_dataloader = DataLoader(test, batch_size=batch_size, shuffle=False)

device = torch_dml.device()


def test(model):
    model.to(device)
    model.eval()
    y_predict = numpy.zeros([])
    y_true = numpy.zeros([])

    with torch.no_grad():
        for step, batch in enumerate(test_dataloader):
            if step % 10 == 0 and not step == 0:
                print("step: ", step)
            b_input_ids = batch[0].to(device)
            b_input_mask = batch[1].to(device)
            b_labels = batch[2].to(device)

            outputs = model(b_input_ids,
                            token_type_ids=None,
                            attention_mask=b_input_mask,
                            labels=b_labels)

            y_predict = numpy.append(y_predict, torch.argmax(outputs.logits, dim=1).cpu().numpy())
            y_true = numpy.append(y_true, b_labels.cpu().numpy())

        y_true = y_true.astype(int)
        y_predict = y_predict.astype(int)
        # print(y_true)
        # print(y_predict)
        print(f1_score(y_predict, y_true, labels=range(202), average="macro"))


test(model_cls)
