import warnings
import time
import torch
from sklearn.metrics import f1_score
import numpy as np
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from common.configs.path import paths

writer = SummaryWriter('./Resultlog')


def fxn():
    warnings.warn("deprecated", DeprecationWarning)


with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    fxn()


def adjust_learning_rate(args, optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = args.learning_rate * (0.1 ** (epoch // 10))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def train(args, model, optimizer, loss_fn, train_dataloader, val_dataloader=None, device=torch.device('cpu'), epochs=10, patience=5):
    # Tracking best validation accuracy
    best_accuracy = 0
    best_f1 = 0

    path = '{}_[ep]{}_[lr]{}_[gram]{}_[Emd]{}.model'.format(args.model,
                                                            str(round(
                                                                args.learning_rate, 4)).replace('.', ''),
                                                            args.epochs,
                                                            args.gram,
                                                            args.word_Embedding).format(args.model)
    early_stopping = EarlyStopping(
        path=path, savecp=args.savecp, patience=patience, verbose=False)

    # Start training loop
    print(f"{'Epoch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Val F1':^10} | {'Learning Rate':^10} | {'Elapsed':^9}")
    print("-"*87)

    valid_epochs_loss = []

    scheduler = lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min', factor=0.5, patience=2, min_lr=0.0001)

    for epoch_i in range(epochs):
        # =======================================
        #               Training
        # =======================================

        # Tracking time and loss
        t0_epoch = time.time()
        total_loss = 0

        # Put the model into the training mode
        model.train()

        for step, batch in enumerate(train_dataloader):
            # Load batch to GPU
            b_input_ids, b_labels = tuple(t.to(device) for t in batch)

            # Zero out any previously calculated gradients
            model.zero_grad()

            # Perform a forward pass. This will return logits.
            # b_input_ids = b_input_ids.type(torch.LongTensor)
            # b_labels = b_labels.type(torch.LongTensor)
            logits = model(b_input_ids.to(device).long())

            # Compute loss and accumulate the loss values

            loss = loss_fn(logits, b_labels)
            total_loss += loss.item()

            # Perform a backward pass to calculate gradients
            loss.backward(retain_graph=True)

            # Update parameters
            optimizer.step()

        learning_rate = optimizer.param_groups[-1]['lr']

        # adjust_learning_rate(args, optimizer, epoch_i)

        # Calculate the average loss over the entire training data
        avg_train_loss = total_loss / len(train_dataloader)

        writer.add_scalar("Train Loss", avg_train_loss, epoch_i)
        
        
        for name, weight in model.named_parameters():
            writer.add_histogram(name, weight, epoch_i)
            # writer.add_histogram(f'{name}.grad', weight.grad, epoch_i)

        # =======================================
        #               Evaluation
        # =======================================
        if val_dataloader is not None:
            # After the completion of each training epoch, measure the model's
            # performance on our validation set.
            val_loss, val_accuracy, val_f1 = evaluate(
                model, val_dataloader, loss_fn, device)

            # Track the best accuracy
            if val_accuracy > best_accuracy:
                best_accuracy = val_accuracy

            if val_f1 > best_f1:
                best_f1 = val_f1

            valid_epochs_loss.append(val_loss)

            # Print performance over the entire training data
            time_elapsed = time.time() - t0_epoch
            scheduler.step(val_loss)
            print(f"{epoch_i + 1:^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {val_f1:^9.4f} | {learning_rate:^9.4f} | {time_elapsed:^9.2f}")

        avg_valid_loss = np.mean([loss.item() for loss in valid_epochs_loss])

        writer.add_scalar("Valid Loss", avg_valid_loss, epoch_i)

        early_stopping(
            val_loss=valid_epochs_loss[-1], model=model)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    print("\n")
    print(
        f"Training complete! Best results: [f1] {best_f1:.2f} [accuracy] {best_accuracy:.2f}%.")
    return best_f1, best_accuracy


def evaluate(model, val_dataloader, loss_fn, device):
    """After the completion of each training epoch, measure the model's
    performance on our validation set.
    """
    # Put the model into the evaluation mode. The dropout layers are disabled
    # during the test time.
    model.eval()

    # Tracking variables
    val_accuracy = []
    val_f1 = []
    val_loss = []

    # For each batch in our validation set...
    for batch in val_dataloader:
        # Load batch to GPU
        b_input_ids, b_labels = tuple(t.to(device) for t in batch)

        # Compute logits
        with torch.no_grad():
            logits = model(b_input_ids.to(device).long())

        # Compute loss
        loss = loss_fn(logits, b_labels)
        val_loss.append(loss.item())

        # Get the predictions
        preds = torch.argmax(logits, dim=1).flatten()

        # Calculate the accuracy rate
        accuracy = (preds == b_labels).cpu().numpy().mean() * 100
        f1_ = f1_score(b_labels.cpu().numpy(),
                       preds.cpu().numpy(), average="macro")

        val_accuracy.append(accuracy)
        val_f1.append(f1_)

    # Compute the average accuracy and loss over the validation set.
    val_loss = np.mean(val_loss)
    val_accuracy = np.mean(val_accuracy)
    val_f1 = np.mean(val_f1)

    return val_loss, val_accuracy, val_f1


class EarlyStopping():
    def __init__(self, path, savecp, patience=7, verbose=False, delta=0):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.delta = delta
        self.path = path
        self.savecp = savecp

    def __call__(self, val_loss, model):
        score = -val_loss
        if self.best_score is None:
            self.best_score = score
            if self.savecp:
                self.save_checkpoint(val_loss, model)
        elif score < self.best_score+self.delta:
            self.counter += 1
            # print(
            #     f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            if self.savecp:
                self.save_checkpoint(val_loss, model)
            self.counter = 0

    def save_checkpoint(self, model, val_loss):
        if self.verbose:
            print(
                f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')
        torch.save(model, r'common/models/' + self.path)
        self.val_loss_min = val_loss
