import os
import gc
import time
import copy
import pandas as pd
import numpy as np
import torch

from torch import nn
from torch.optim import lr_scheduler
import torch.optim as optim
from tqdm import tqdm
from torcheval.metrics.functional import binary_auroc, multiclass_auroc
from sklearn.metrics import roc_auc_score
from collections import defaultdict
from sklearn.model_selection import StratifiedGroupKFold

from utils import set_seed, print_trainable_parameters
from datasets import prepare_loaders
from models import setup_model


def fetch_scheduler(optimizer, CONFIG):
    if CONFIG['scheduler'] == 'CosineAnnealingLR':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=CONFIG['T_max'], 
                                                   eta_min=CONFIG['min_lr'])
    elif CONFIG['scheduler'] == 'CosineAnnealingWarmRestarts':
        scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer,T_0=CONFIG['T_0'], 
                                                             eta_min=CONFIG['min_lr'])
    elif CONFIG['scheduler'] == None:
        return None
        
    return scheduler


def criterion(outputs, targets, task):
    if task == 2018 or task == 2019:
        return nn.CrossEntropyLoss(reduction='sum')(outputs, targets)
    else:
        return nn.BCELoss(reduction='sum')(outputs, targets.type(torch.float))


def metric_function(output, target, task):
    if task == 2018:
        return multiclass_auroc(input=output, target=target, num_classes=7).item()
    elif task == 2019:
        return multiclass_auroc(input=output, target=target, num_classes=9).item()
    else:
        return binary_auroc(input=output, target=target).item()


def calculate(outputs, targets, tasks, CONFIG):
    loss = torch.tensor(0.0, device=targets.device, dtype=torch.float)
    auroc = 0.0
    for task in CONFIG['tasks']:
        task_idx = tasks == task
        if sum(task_idx) == 0:
            continue
        loss += criterion(outputs[task][task_idx], targets[task_idx], task)
        auroc += metric_function(outputs[task][task_idx], targets[task_idx], task) * task_idx.shape[0]
    return loss, auroc


def train_one_epoch(model, optimizer, scheduler, dataloader, device, epoch, CONFIG):
    
    model.train()
    
    dataset_size = 0
    running_loss = 0.0
    running_auroc = 0.0
    
    bar = tqdm(enumerate(dataloader), total=len(dataloader))
    for step, data in bar:
        images = data['image'].to(device, dtype=torch.float)
        targets = data['target'].to(device, dtype=torch.long)
        tasks = data['task'].to(device, dtype=torch.long)
        # import ipdb;ipdb.set_trace()
        
        batch_size = images.size(0)
        
        outputs = model(images)

        loss, auroc = calculate(outputs, targets, tasks, CONFIG)
        loss /= batch_size
            
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if scheduler is not None:
            scheduler.step()

        running_loss += (loss.item() * batch_size)
        running_auroc += auroc
        dataset_size += batch_size
        
        epoch_loss = running_loss / dataset_size
        epoch_auroc = running_auroc / dataset_size
        
        bar.set_postfix(Epoch=epoch, Train_Loss=epoch_loss, Train_Auroc=epoch_auroc,
                        LR=optimizer.param_groups[0]['lr'])
    gc.collect()
    
    return epoch_loss, epoch_auroc


@torch.inference_mode()
def valid_one_epoch(model, dataloader, device, epoch, optimizer, CONFIG):
    model.eval()
    
    dataset_size = 0
    running_loss = 0.0
    running_auroc = 0.0
    
    bar = tqdm(enumerate(dataloader), total=len(dataloader))
    for step, data in bar:        
        images = data['image'].to(device, dtype=torch.float)
        targets = data['target'].to(device, dtype=torch.long)
        tasks = data['task'].to(device, dtype=torch.long)
        
        batch_size = images.size(0)

        outputs = model(images)
        
        loss, auroc = calculate(outputs, targets, tasks, CONFIG)

        running_loss += (loss.item() * batch_size)
        running_auroc += auroc
        dataset_size += batch_size
        
        epoch_loss = running_loss / dataset_size
        epoch_auroc = running_auroc / dataset_size
        
        bar.set_postfix(Epoch=epoch, Valid_Loss=epoch_loss, Valid_Auroc=epoch_auroc, LR=optimizer.param_groups[0]['lr'])   
    
    gc.collect()

    return epoch_loss, epoch_auroc


def run_training(train_loader, valid_loader, model, optimizer, scheduler, CONFIG, 
        model_folder=None, tolerance_max=15, best_epoch_score_def=-np.inf, start_epoch=1):
    seed = CONFIG['seed']
    num_epochs = CONFIG['epochs']
    device = CONFIG['device']
    save_period = CONFIG['save_period']
    set_seed(seed)
    start = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_epoch_score = best_epoch_score_def
    history = defaultdict(list)
    tolerance = 0

    for epoch in range(start_epoch, num_epochs + 1): 
        if tolerance > tolerance_max:
            break
        gc.collect()
        train_epoch_loss, train_epoch_auroc = train_one_epoch(model, optimizer, scheduler, train_loader, 
                                                              device, epoch, CONFIG)

        val_epoch_loss, val_epoch_auroc = valid_one_epoch(model, valid_loader, device, epoch, optimizer, CONFIG)
    
        history['Train Loss'].append(train_epoch_loss)
        history['Valid Loss'].append(val_epoch_loss)
        history['Train AUROC'].append(train_epoch_auroc)
        history['Valid AUROC'].append(val_epoch_auroc)
        history['lr'].append( scheduler.get_lr()[0] )
        if model_folder is not None:
            with open(os.path.join(model_folder, 'log.txt'), 'a', encoding='utf-8') as file:
                file.write(f'Epoch:{epoch}, Train Loss:{train_epoch_loss}, Valid Loss:{val_epoch_loss}, Train AUROC:{train_epoch_auroc}, Valid AUROC:{val_epoch_auroc}, lr:{scheduler.get_lr()[0]}\n')
        
        if best_epoch_score <= val_epoch_auroc:
            tolerance = 0
            print(f"Validation AUROC Improved ({best_epoch_score} ---> {val_epoch_auroc})")
            best_epoch_score = val_epoch_auroc
            best_model_wts = copy.deepcopy(model.state_dict())
            if model_folder is not None:
                torch.save(model.state_dict(), os.path.join(model_folder, 'best'))
        else:
            tolerance += 1
        if epoch % save_period == 0:
            if model_folder is not None:
                checkpoint = {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict(),
                    'epoch': epoch,
                }
                torch.save(checkpoint, os.path.join(model_folder, f'epoch_{epoch}.pth'))
            
        print()
    
    end = time.time()
    time_elapsed = end - start
    print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
        time_elapsed // 3600, (time_elapsed % 3600) // 60, (time_elapsed % 3600) % 60))
    print("Best AUROC: {:.4f}".format(best_epoch_score))    
    model.load_state_dict(best_model_wts)
    return model, history
