"""
Utility functions for training and validating models.
"""

import time
import torch

import torch.nn as nn
from torch.utils.data import dataset

from tqdm import tqdm
from ssea.utils import correct_predictions


def train(model,
          dataloader,
          optimizer,
          criterion,
          epoch_number,
          max_gradient_norm,
          max_premises_length,
          max_hypothesis_length,
          batch_size):
    """
    Train a model for one epoch on some input data with a given optimizer and
    criterion.

    Args:
        model: A torch module that must be trained on some input data.
        dataloader: A DataLoader object to iterate over the training data.
        optimizer: A torch optimizer to use for training on the input model.
        criterion: A loss criterion to use for training.
        epoch_number: The number of the epoch for which training is performed.
        max_gradient_norm: Max. norm for gradient norm clipping.

    Returns:
        epoch_time: The total time necessary to train the epoch.
        epoch_loss: The training loss computed for the epoch.
        epoch_accuracy: The accuracy computed for the epoch.
    """
    # Switch the model to train mode.
    model.train()
    device = model.device
    optimizer.zero_grad()
    epoch_start = time.time()
    batch_time_avg = 0.0
    running_loss = 0.0
    correct_preds = 0
    epoch=0

    tqdm_batch_iterator = tqdm(dataloader,desc='Training epoch ' + str(epoch + 1) + '')

    for batch_index, batch in enumerate(tqdm_batch_iterator):


        premises=torch.reshape(batch['premises'],(batch_size,max_premises_length))

        premises_adjacency_list=batch['premises_adjacency_list']
        # premises_tree_sizes=batch['premises_tree_sizes']
        premises_lengths=batch['premises_lengths']
        premises_all_lengths=batch['premises_all_lengths']
        hypotheses=torch.reshape(batch['batch_hypotheses'],(batch_size,max_hypothesis_length))

        hypotheses_adjacency_list=batch["hypotheses_adjacency_list"]
        # hypotheses_tree_sizes=batch["hypotheses_tree_sizes"]
        hypotheses_lengths=batch['hypotheses_lengths']
        hypotheses_all_lengths=batch['hypotheses_all_lengths']
        labels = torch.tensor(batch["labels"]).to(device)

        batch_start = time.time()

        optimizer.zero_grad()
        logits, probs = model(premises.to(device),

                              premises_adjacency_list.to(device),
                              
                              torch.tensor(premises_lengths).to(device),   
                              premises_lengths,                 
                              hypotheses.to(device),

                              hypotheses_adjacency_list.to(device),
                             
                        

                              torch.tensor(hypotheses_lengths).to(device),
                               hypotheses_lengths,
                             )


        loss = criterion(logits, labels)
      
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm)
        optimizer.step()
        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()
        correct_preds += correct_predictions(probs, labels)

        description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
                      .format(batch_time_avg/(batch_index+1),
                              running_loss/(batch_index+1))
        tqdm_batch_iterator.set_description(description)
        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()


    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / len(dataloader)
    epoch_accuracy = correct_preds / len(dataloader.dataset)

    return epoch_time, epoch_loss, epoch_accuracy


def validate(model, dataloader, criterion,batch_size,max_dev_premises_length,max_dev_hypothesis_length):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """
    # Switch to evaluate mode.
    model.eval()
    device = model.device
    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0
    i=0
    # Deactivate autograd for evaluation.
    with torch.no_grad():

        for batch in dataloader:           
            premises=torch.reshape(batch['premises'],(batch_size,max_dev_premises_length))
            premises_adjacency_list=batch['premises_adjacency_list']
            premises_lengths=batch['premises_lengths']
            premises_all_lengths=batch['premises_all_lengths']
            hypotheses=torch.reshape(batch['batch_hypotheses'],(batch_size,max_dev_hypothesis_length))
            hypotheses_adjacency_list=batch["hypotheses_adjacency_list"]
            hypotheses_lengths=batch['hypotheses_lengths']
            hypotheses_all_lengths=batch['hypotheses_all_lengths']
            labels = torch.tensor(batch["labels"]).to(device)
   

            logits, probs = model(premises.to(device),
                              premises_adjacency_list.to(device),
                              
                              torch.tensor(premises_lengths).to(device),   
                              premises_lengths,                 
                              hypotheses.to(device),
                      
                              hypotheses_adjacency_list.to(device),
                              torch.tensor(hypotheses_lengths).to(device),
                               hypotheses_lengths,
                              )
            loss = criterion(logits+1e-10, labels)
            if loss.item()!=loss.item():
                  i=i+1
                  continue
            running_loss += loss.item()
            running_accuracy += correct_predictions(probs, labels)
    print(i)
    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / (len(dataloader)-i)

    epoch_accuracy = running_accuracy / (len(dataloader.dataset)-i*batch_size)

    return epoch_time, epoch_loss, epoch_accuracy
