import torch
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from torch.utils.data import DataLoader
from transformers import (AdamW, BertForSequenceClassification, Trainer,
                          TrainingArguments, get_linear_schedule_with_warmup)

import config
import data


def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels,
                                                               preds,
                                                               average=None)
    acc = accuracy_score(labels, preds)
    return {
        "accuracy": acc,
        "f1": f1,
        "precision": precision,
        "recall": recall
    }


def main():
    train_dataset = data.ContradictionDataset(config.train_csv_file_path)
    val_dataset = data.ContradictionDataset(config.dev_csv_file_path)

    model = BertForSequenceClassification.from_pretrained(
        config.bert_pretrained_folder, num_labels=len(config.categories))
    optimizer_grouped_parameters = [{
        "params": model.bert.parameters(),
        "learning_rate": 1e-5,
        "weight_decay": 0.0001
    }, {
        "params": model.classifier.parameters(),
        "learning_rate": 1e-3,
        "weight_decay": 0.01
    }]
    optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5)
    epoch = 3
    batch_size = 4
    total_steps = len(train_dataset) // batch_size * epoch
    lr_scheduler = get_linear_schedule_with_warmup(optimizer,
                                                   num_warmup_steps=500,
                                                   num_training_steps=total_steps)
                                                
    training_args = TrainingArguments(output_dir="results",
                                      num_train_epochs=3,
                                      learning_rate=5e-5,
                                      per_device_train_batch_size=4,
                                      per_device_eval_batch_size=4,
                                      warmup_steps=500,
                                      weight_decay=0.001,
                                      logging_dir="log",
                                      logging_steps=100)

    trainer = Trainer(model=model,
                      args=training_args,
                      train_dataset=train_dataset,
                      eval_dataset=val_dataset,
                      tokenizer=train_dataset.tokenizer,
                      compute_metrics=compute_metrics,
                      optimizers=(optimizer, lr_scheduler))
    trainer.train()
    # trainer.evaluate()


if __name__ == "__main__":
    main()
