import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertForSequenceClassification

import config
import data


def main():
    model = BertForSequenceClassification.from_pretrained(
        R"G:\temp\content_eval")
    model.eval()
    model.to("cuda")

    eval_dataset = data.ContradictionDataset(config.dev_csv_file_path)
    eval_loader = DataLoader(eval_dataset, batch_size=1, shuffle=False)

    content = eval_dataset[0]
    count = 0
    with torch.no_grad():
        for i, batch_data in enumerate(tqdm(eval_loader, ascii=True)):
            output = model(
                input_ids=batch_data["input_ids"].to("cuda"),
                attention_mask=batch_data["attention_mask"].to("cuda"),
                token_type_ids=batch_data["token_type_ids"].to("cuda"))
            predict = torch.argmax(output.logits).item()
            label = batch_data["labels"].item()
            if predict == label:
                count += 1
    print(F"count is: {count}, precision: {count / len(eval_dataset):.2%}")


if __name__ == "__main__":
    main()
