import os
import pandas as pd
from datasets import Dataset
from transformers import AutoTokenizer, T5ForConditionalGeneration, Trainer, TrainingArguments


# Define the parameters
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
input_max_length = 232
output_max_length = 21
base_model = 't5-small'
wandb_run_name = 't5-small-commit-message-bs8-232-21-uncased'
output_dir = 'output/models/'+wandb_run_name

# Load the dataset
train_dataset = pd.read_csv('datasets/commit_message/train-uncased.csv', low_memory=False)
# train_dataset = train_dataset[['article', 'abstract']]
# train_dataset.columns = ['src', 'target']
train_dataset.dropna(axis=0, how='any', inplace=True)
train_dataset = Dataset.from_pandas(train_dataset)
val_dataset = pd.read_csv('datasets/commit_message/valid-uncased.csv')
# val_dataset = val_dataset[['article', 'abstract']]
# val_dataset.columns = ['src', 'target']
val_dataset.dropna(axis=0, how='any', inplace=True)
val_dataset = Dataset.from_pandas(val_dataset)
print(train_dataset)
print(val_dataset)

# Tokenization
tokenizer = AutoTokenizer.from_pretrained(base_model)
model = T5ForConditionalGeneration.from_pretrained(base_model)


def tokenize(batch):
    tokenized_input = tokenizer(batch['src'], padding='max_length', truncation=True, max_length=input_max_length)
    tokenized_label = tokenizer(batch['target'], padding='max_length', truncation=True, max_length=output_max_length)
    tokenized_input['labels'] = tokenized_label['input_ids']
    return tokenized_input


train_dataset = train_dataset.map(tokenize, batched=True, batch_size=512)
val_dataset = val_dataset.map(tokenize, batched=True, batch_size=512)

train_dataset.set_format('numpy', columns=['input_ids', 'attention_mask', 'labels'])
val_dataset.set_format('numpy', columns=['input_ids', 'attention_mask', 'labels'])


# Train the model
training_args = TrainingArguments(
    output_dir=output_dir,
    num_train_epochs=10,
    per_device_train_batch_size=8,
    per_device_eval_batch_size=8,
    eval_accumulation_steps=1,  # Number of eval steps to keep in GPU (the higher, the mor vRAM used)
    prediction_loss_only=True,
    # If I need co compute only loss and not other metrics, setting this to true will use less RAM
    learning_rate=0.001,
    evaluation_strategy='steps',  # Run evaluation every eval_steps
    save_steps=2500,  # How often to save a checkpoint
    save_total_limit=1,  # Number of maximum checkpoints to save
    remove_unused_columns=True,  # Removes useless columns from the dataset
    run_name=wandb_run_name,  # Wandb run name
    logging_steps=1000,  # How often to log loss to wandb
    eval_steps=2500,  # How often to run evaluation on the val_set
    logging_first_step=False,  # Whether to log also the very first training step to wandb
    load_best_model_at_end=True,  # Whether to load the best model found at each evaluation.
    metric_for_best_model="loss",  # Use loss to evaluate best model.
    greater_is_better=False  # Best model is the one with the lowest loss, not highest.
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=val_dataset
)

trainer.train()
trainer.save_model(output_dir)
