from sklearn import multiclass
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR, CosineAnnealingLR
from torch.cuda.amp import autocast, GradScaler
import numpy as np
import pandas as pd
from collections import Counter, OrderedDict
import time
import os
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from utils.metrics import SegMeter, epoch_log, AverageMeter, accuracy
from utils.trainer.distributed_utils import reduce_value, is_main_process, get_rank
from utils.helper import cm_analysis
from models.optimizer import RAdam, RangerQH, RangerLars
from models.scheduler import CosineAnnealingScheduler
from adabelief_pytorch import AdaBelief
from timm.data.mixup import Mixup
from sklearn.metrics import roc_auc_score
import logging
logger = logging.getLogger('ClsModel.Train')


class Cls_Trainer(object):
    '''This class takes care of training and validation of cls model'''
    def __init__(self, dataloaders, model, device, gpu_type, optimizer,
                criterion, out_dir, ymap, class_num, epoch, lr, earlystop_patience, 
                lr_scheduler_name, init_indics, args):
        self.out_dir = out_dir
        self.lr = args.lr
        self.weight_decay = args.weight_decay
        self.num_epochs = epoch
        self.lr = lr
        self.patience = earlystop_patience
        self.condition = args.condition.lower()
        self.phases = ["train", "val"]
        self.class_num = class_num
        self.lr_scheduler_name = lr_scheduler_name
        self.betas_for_optim = args.betas_for_optim
        self.eps_for_optim = args.eps_for_optim
        self.device = device
        self.gpu_type = gpu_type

        self.cuda = torch.cuda.is_available()
        if self.cuda:
            cudnn.benchmark = True

        self.net = model
        self.criterion = criterion

        if optimizer == 'adam':
            self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay, amsgrad=True)
        elif optimizer == 'sgd':
            self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr, momentum=0.9, weight_decay=self.weight_decay)
        elif optimizer == 'radam':
            self.optimizer = RAdam(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        elif optimizer == 'adabelief':
            self.optimizer = AdaBelief(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay, eps=self.eps_for_optim, betas=self.betas_for_optim, weight_decouple = True, rectify = True)
        elif optimizer == 'rangerqh': 
            self.optimizer = RangerQH(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay, eps=self.eps_for_optim, betas=self.betas_for_optim)
        elif optimizer == 'rangerlars': 
           self.optimizer = RangerLars(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay, eps=self.eps_for_optim, betas=self.betas_for_optim)

        # pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
        # for k, v in self.net.named_modules():
        #     if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
        #         pg2.append(v.bias)  # biases
        #     if isinstance(v, nn.BatchNorm2d) or isinstance(v, nn.BatchNorm1d):
        #         pg0.append(v.weight)  # no decay
        #     elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
        #         pg1.append(v.weight)  # apply decay

        # pg0_num = len(list(filter(lambda p: p.requires_grad, pg0)))
        # pg1_num = len(list(filter(lambda p: p.requires_grad, pg1)))
        # pg2_num = len(list(filter(lambda p: p.requires_grad, pg2)))
        # print('pg0 num: ', pg0_num)
        # print('pg1 num: ', pg1_num)
        # print('pg2 num: ', pg2_num)

        # if pg0_num > 0:
        #     pg = pg0
        # else:
        #     pg = pg2

        # if optimizer == 'adam':
        #     self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, pg), lr=self.lr, betas=self.betas_for_optim, amsgrad=True)
        # elif optimizer == 'sgd':
        #     self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, pg), lr=self.lr, momentum=0.9, weight_decay=self.weight_decay)  # nesterov=True
        # elif optimizer == 'radam':
        #     self.optimizer = RAdam(filter(lambda p: p.requires_grad, pg), lr=self.lr)
        # elif optimizer == 'adabelief':
        #     self.optimizer = AdaBelief(filter(lambda p: p.requires_grad, pg), lr=self.lr, eps=self.eps_for_optim, betas=self.betas_for_optim, weight_decouple = True, rectify=True)
        # elif optimizer == 'rangerqh': 
        #     self.optimizer = RangerQH(filter(lambda p: p.requires_grad, pg), lr=self.lr, eps=self.eps_for_optim, betas=self.betas_for_optim)
        # elif optimizer == 'rangerlars': 
        #    self.optimizer = RangerLars(filter(lambda p: p.requires_grad, pg), lr=self.lr, eps=self.eps_for_optim, betas=self.betas_for_optim)
        # else:
        #     raise ValueError('Could Only Support optimizer in [Adam, SGD, RAdam, AdaBelief].')

        # if pg1_num > 0:
        #     self.optimizer.add_param_group({'params': filter(lambda p: p.requires_grad, pg1), 'weight_decay': self.weight_decay})  # add pg1 with weight_decay
        # if pg0_num > 0 and pg2_num > 0:
        #     self.optimizer.add_param_group({'params': filter(lambda p: p.requires_grad, pg2)})  # add pg2 (biases)
        # del pg0, pg1, pg2

        if self.lr_scheduler_name == 'ReduceLROnPlateau':
            self.scheduler = ReduceLROnPlateau(self.optimizer, 'max', factor=args.scheduler_factor, min_lr=1e-06, patience=args.scheduler_patience, verbose=True)
        elif self.lr_scheduler_name == 'CosineAnnealingLR':
            self.scheduler = CosineAnnealingLR(optimizer=self.optimizer, eta_min=0.000001, T_max=self.num_epochs, verbose=True)
        else:
            self.scheduler = None
        
        self.dataloaders = dataloaders
        self.model_name = args.model
        self.ymap = ymap

        self.use_mixup = args.use_mixup
        if self.use_mixup:
            self.mixup_fn = Mixup(
                mixup_alpha=args.mixup_alpha, cutmix_alpha=args.cutmix_alpha, cutmix_minmax=None,
                prob=args.mix_prob, switch_prob=0.5, mode='batch',
                label_smoothing=0.0, num_classes=class_num)

        # Some indicators
        self.losses = {phase: [] for phase in self.phases}
        self.acc1s = {phase: [] for phase in self.phases}
        self.acc2s = {phase: [] for phase in self.phases}
        self.auc_ovr = {phase: [] for phase in self.phases}
        self.auc_ovo = {phase: [] for phase in self.phases}

        self.best_epoch = -1
        self.best_loss = float("inf")
        self.best_acc = 0
        self.best_auc_ovr = 0
        self.best_auc_ovo = 0

        self.init_indics = init_indics

        if self.condition == 'loss':
            self.best_loss = self.init_indics
        elif self.condition == 'acc':
            self.best_acc = self.init_indics
        elif self.condition == 'auc_ovr':
            self.best_auc_ovr = self.init_indics
        elif self.condition == 'auc_ovo':
            self.best_auc_ovr = self.init_indics

    def forward(self, images, targets):
        if self.cuda:
            images = images.to(self.device)  # .cuda(non_blocking=True)
            for j in range(len(targets)):
                targets[j] = targets[j].to(self.device)  #.cuda(non_blocking=True)

        if self.use_mixup and self.net.training:
            images, targets = self.mixup_fn(images, targets[0])
        else:
            targets = nn.functional.one_hot(targets[0], num_classes=self.class_num)  
        targets = targets.long()
        with autocast():
            outputs = self.net(images)
            losses = self.criterion(outputs, targets)

        return losses, outputs
    
    def early_stop(self, epoch, cur_val_memo):
        if epoch - self.best_epoch < self.patience:
            return False
        if self.condition.lower() == 'loss':
            if cur_val_memo['loss'] > self.best_loss: return True
            else: return False
        elif self.condition.lower() == 'acc':
            if cur_val_memo['acc'] <= self.best_acc: return True
            else: return False
        elif self.condition.lower() == 'auc_ovr':
            if cur_val_memo['auc_ovr'] <= self.best_auc_ovr: return True
            else: return False
        elif self.condition.lower() == 'auc_ovo':
            if cur_val_memo['auc_ovo'] <= self.best_auc_ovo: return True
            else: return False
        else: return False
    
    def update_metrics(self, loss, acc, auc_ovr, auc_ovo, epoch):
        best_score = -1
        # if loss < self.best_loss:
        #     self.best_loss = loss
        #     if self.condition == 'loss':
        #         self.best_epoch = epoch
        #         best_score = loss
        if acc > self.best_acc:
            self.best_acc = acc
            if self.condition == 'acc':
                self.best_epoch = epoch
                best_score = acc
        # if auc_ovr > self.best_auc_ovr:
        #     self.best_auc_ovr = auc_ovr
        #     if self.condition == 'auc_ovr':
        #         self.best_epoch = epoch
        #         best_score = auc_ovr
        # if auc_ovo > self.best_auc_ovo:
        #     self.best_auc_ovo = auc_ovo
        #     if self.condition == 'auc_ovo':
        #         self.best_epoch = epoch
        #         best_score = auc_ovo
        
        return epoch == self.best_epoch, best_score

    def add_epoch_result(self, phase, epoch_loss, top1, top2):
        if phase == 'train' or self.gpu_type != 2:
            self.losses[phase].append(epoch_loss)
            self.acc1s[phase].append(float(np.squeeze(top1.avg.cpu().numpy())))
            self.acc2s[phase].append(float(np.squeeze(top2.avg.cpu().numpy())))
            self.auc_ovr[phase].append(0)
            self.auc_ovo[phase].append(0)
            # self.auc_ovr[phase].append(roc_auc_score(y_true, out_probs, multi_class='ovr'))
            # self.auc_ovo[phase].append(roc_auc_score(y_true, out_probs, multi_class='ovo'))
        else:
            # reduce metrics
            if self.condition == 'loss':
                epoch_loss = reduce_value(torch.tensor(epoch_loss).cuda(), average=True)
            self.losses[phase].append(epoch_loss)

            cur_acc1 = top1.avg.cpu().numpy()
            if self.condition == 'acc':
                cur_acc1 = reduce_value(torch.tensor(cur_acc1).cuda(), average=True)
            self.acc1s[phase].append(cur_acc1)

            self.acc2s[phase].append(float(np.squeeze(top2.avg.cpu().numpy())))
            self.auc_ovr[phase].append(0)
            self.auc_ovo[phase].append(0)
        return epoch_loss

    def iterate(self, epoch, num_epochs, phase, scaler):
        # losses = AverageMeter('Loss', ":.4e")

        if is_main_process():
            start = time.strftime("%H:%M:%S")
            logger.info(f"Starting epoch: {epoch}/{num_epochs} | phase: {phase} | ⏰: {start}")
        self.net.train(phase == "train")
        dataloader = self.dataloaders[phase]
        if phase == "train" and self.gpu_type==2:
            dataloader.batch_sampler.sampler.set_epoch(epoch)
        running_loss = 0.0
        # total_batches = len(dataloader)
        top1 = AverageMeter('Acc@1', ":6.2f")
        top2 = AverageMeter('Acc@1', ":6.2f")
        y_pred = []
        y_true = []
        out_probs = None
        total_batches = 0
        if is_main_process():
            loader = enumerate(tqdm(dataloader))
        else:
            loader = enumerate(dataloader)
        for itr, (images, targets, _) in loader:
            total_batches += 1
            loss, outputs = self.forward(images, targets)
            target = targets[0].cpu()

            if phase == "train":
                scaler.scale(loss).backward()
                scaler.step(self.optimizer)
                scaler.update()

                self.optimizer.zero_grad()
            
            if self.gpu_type == 2:
                loss = reduce_value(loss, average=True)

            running_loss += loss.item()
            outputs =  outputs.detach().cpu()

            acc1, acc2 = accuracy(outputs, target, topk=(1,2))
            _, preds = outputs.float().topk(1, 1, True, True)
            preds = torch.squeeze(preds.t())
            y_pred.extend(preds.numpy())
            y_true.extend(target.numpy())
            top1.update(acc1[0], images.size(0))
            top2.update(acc2[0], images.size(0))
            if out_probs is None:
                out_probs = torch.softmax(outputs.float(), dim=1).numpy()
            else:
                out_probs = np.concatenate([out_probs, torch.softmax(outputs.float(), dim=1).numpy()], axis=0)

        if self.device != torch.device("cpu"):
            torch.cuda.synchronize(self.device)
        epoch_loss = (running_loss) / total_batches

        epoch_loss = self.add_epoch_result(phase, epoch_loss, top1, top2)

        return epoch_loss, y_pred, y_true

    def start(self):
        scaler = GradScaler()
        self.optimizer.zero_grad()
        for epoch in range(1, self.num_epochs+1):
            self.iterate(epoch=epoch, num_epochs=self.num_epochs, phase="train", scaler=scaler)
            if 'classification_vector' in self.net.__dict__:
                del self.net.classification_vector
            if is_main_process():
                state = {
                    "epoch": epoch,
                    "best_loss": self.best_loss,
                    "state_dict": self.net.state_dict() if self.gpu_type!=2 else self.net.module.state_dict(),
                    "ymap": self.ymap,
                }
            with torch.no_grad():
                val_loss, y_pred, y_true = self.iterate(epoch=epoch, num_epochs=self.num_epochs, phase="val", scaler=scaler)
                if self.lr_scheduler_name == 'ReduceLROnPlateau':
                    self.scheduler.step(self.acc1s['val'][-1])
                elif self.lr_scheduler_name == 'CosineAnnealingLR':
                    self.scheduler.step()
                else:
                    self.scheduler = None
            
            if is_main_process():
                torch.save(state, self.out_dir + "/%s_latest_epoch.pth"%self.model_name)
                if self.gpu_type != 2:
                    cm_analysis(y_true, y_pred, os.path.join(self.out_dir, 'confusion_matrix_latest.png'),
                                np.arange(len(self.ymap.keys())), ymap=self.ymap)
            
            if self.early_stop(epoch,
                {'loss':val_loss, 'acc':self.acc1s['val'][-1], 'auc_ovr':self.auc_ovr['val'][-1], 'auc_ovo':self.auc_ovo['val'][-1]}):
                if is_main_process():
                    logger.info('Early Stopping !!! Best Epoch is %d .' % self.best_epoch)
                break
            
            is_best_model, best_score = self.update_metrics(val_loss, self.acc1s['val'][-1], self.auc_ovr['val'][-1], self.auc_ovo['val'][-1], epoch)
            if is_best_model and is_main_process():
                torch.save(state, self.out_dir + "/%s_best_%s.pth"%(self.model_name, self.condition))
                logger.info('New optimal found (%s %.4f), state saved.' %( self.condition, best_score))

            if is_main_process():
                logger.info('Train Loss: %.4f, Acc-1: %.4f, Acc-2: %.4f, Auc-ovr: %.4f, Auc-ovo: %.4f.' % (
                    self.losses['train'][-1],
                    self.acc1s['train'][-1],
                    self.acc2s['train'][-1],
                    self.auc_ovr['train'][-1],
                    self.auc_ovo['train'][-1]
                    ))
                logger.info('Val Loss: %.4f, Acc-1: %.4f, Acc-2: %.4f, Auc-ovr: %.4f, Auc-ovo: %.4f.' % (
                    self.losses['val'][-1],
                    self.acc1s['val'][-1],
                    self.acc2s['val'][-1],
                    self.auc_ovr['val'][-1],
                    self.auc_ovo['val'][-1]
                    ))

                print()
        return self.best_acc
