# from msilib.schema import Condition
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import numpy as np
import pandas as pd
from collections import Counter, OrderedDict
import time
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from torch.cuda.amp import autocast, GradScaler
from utils.metrics import SegMeter, epoch_log
from utils.trainer.distributed_utils import reduce_value, is_main_process
from models.optimizer import RAdam
from models.scheduler import CosineAnnealingScheduler
from adabelief_pytorch import AdaBelief
# import nni
import logging
logger = logging.getLogger('SegModel.Train')


class Seg_Trainer(object):
    '''This class takes care of training and validation of seg model'''
    def __init__(self, dataloaders, model, device, gpu_type, criterion, out_dir, args):
        self.batch_size = args.batch_size
        # if self.batch_size < 24:
        #     self.accumulation_steps = 24 // self.batch_size
        # else:
        self.accumulation_steps = 1
        self.out_dir = out_dir
        self.lr = args.lr
        self.num_epochs = args.epochs
        self.use_amp = args.use_amp
        self.device = device
        self.gpu_type = gpu_type
        
        self.phases = ["train", "val"]
        self.cuda = torch.cuda.is_available()
        if self.cuda:
            #torch.set_default_tensor_type("torch.cuda.FloatTensor")
            cudnn.benchmark = True
        
        self.net = model
        self.criterion = criterion
        if args.optimizer == 'adam':
            self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=args.weight_decay, amsgrad=True)
        elif args.optimizer == 'sgd':
            self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr, momentum=0.9, weight_decay=args.weight_decay)
        elif args.optimizer == 'radam':
            self.optimizer = RAdam(self.net.parameters(), lr=self.lr, weight_decay=args.weight_decay)
        elif args.optimizer == 'adabelief':
            self.optimizer = AdaBelief(self.net.parameters(), lr=self.lr, weight_decay=args.weight_decay, eps=1e-16, betas=(0.9,0.999), weight_decouple = True, rectify = True)
        else:
            raise ValueError('Could Only Support optimizer in [Adam, SGD, RAdam, AdaBelief].')
        # self.scheduler = CosineAnnealingScheduler(self.optimizer, start_anneal=30, n_epochs=self.num_epochs)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'max', factor=args.scheduler_factor, min_lr=1e-06, patience=args.patience, verbose=True)

        self.dataloaders = dataloaders
        self.losses = {phase: [] for phase in self.phases}
        self.iou_scores = {phase: [] for phase in self.phases}
        self.dice_scores = {phase: [] for phase in self.phases}
        self.model_name = args.model + '_' + args.backbone

        self.condition = args.condition.lower()
        self.patience = args.patience
        self.best_epoch = -1
        self.best_loss = float("inf")
        self.best_dice = 0
        self.best_iou = 0

    def early_stop(self, epoch, cur_val_memo):
        if epoch - self.best_epoch < self.patience:
            return False
        if self.condition == 'loss':
            if cur_val_memo['loss'] > self.best_loss: 
                return True
            else: return False
        elif self.condition == 'dice':
            if cur_val_memo['dice'] < self.best_dice: return True
            else: return False
        elif self.condition == 'iou':
            if cur_val_memo['iou'] < self.best_iou: return True
            else: return False
        else: return False
    
    def update_metrics(self, loss, dice_score, iou_score, epoch):
        best_score = -1
        if loss < self.best_loss:
            self.best_loss = loss
            if self.condition == 'loss':
                self.best_epoch = epoch
                best_score = loss
        if dice_score > self.best_dice:
            self.best_dice = dice_score
            if self.condition == 'dice':
                self.best_epoch = epoch
                best_score = dice_score
        if iou_score > self.best_iou:
            self.best_iou = iou_score
            if self.condition == 'iou':
                self.best_epoch = epoch
                best_score = iou_score
        return epoch == self.best_epoch, best_score

    def forward(self, images, targets):
        if self.cuda:
            images = images.to(self.device)  # .cuda(non_blocking=True)
            targets = targets.to(self.device)   # .cuda(non_blocking=True)

        with autocast(enabled=self.use_amp):
            outputs = self.net(images)
            loss = self.criterion(outputs, targets)

        return loss, outputs

    def add_epoch_result(self, phase, epoch_loss, dice, iou):
        if phase == 'train' or self.gpu_type != 2:
            self.losses[phase].append(float(epoch_loss))
            self.dice_scores[phase].append(float(dice))
            self.iou_scores[phase].append(float(iou))
        else:
            # reduce metrics
            if self.condition == 'loss':
                epoch_loss = reduce_value(torch.tensor(epoch_loss).cuda(), average=True)
            self.losses[phase].append(epoch_loss)

            if self.condition == 'dice':
                dice = reduce_value(torch.tensor(dice).cuda(), average=True)
            self.dice_scores[phase].append(dice)

            if self.condition == 'iou':
                iou = reduce_value(torch.tensor(iou).cuda(), average=True)
            self.iou_scores[phase].append(iou)

        return epoch_loss

    def iterate(self, epoch, phase, scaler):
        meter = SegMeter(phase, epoch)
        
        if is_main_process():
            start = time.strftime("%H:%M:%S")
            logger.info(f"Starting epoch: {epoch} | phase: {phase} | ⏰: {start}")
        self.net.train(phase == "train")
        dataloader = self.dataloaders[phase]
        if phase == "train" and self.gpu_type==2:
            dataloader.batch_sampler.sampler.set_epoch(epoch)
        running_loss = 0.0
        total_batches = len(dataloader)
        self.optimizer.zero_grad()

        if is_main_process():
            loader = tqdm(dataloader)
        else:
            loader = dataloader

        for itr, (images, targets) in enumerate(loader):
            loss, outputs = self.forward(images, targets)
            loss = loss / self.accumulation_steps
            if phase == "train":
                if self.use_amp:
                    scaler.scale(loss).backward()
                    if (itr + 1 ) % self.accumulation_steps == 0:
                        scaler.step(self.optimizer)
                        scaler.update()
                        self.optimizer.zero_grad()
                else:
                    loss.backward()
                    if (itr + 1 ) % self.accumulation_steps == 0:
                        self.optimizer.step()
                        self.optimizer.zero_grad()
            if self.gpu_type == 2:
                loss = reduce_value(loss, average=True)
            running_loss += loss.item()
            outputs = outputs.detach().cpu()
            meter.update(targets, outputs)
        
        if self.gpu_type == 2:
            torch.cuda.synchronize(self.device)
        epoch_loss = (running_loss * self.accumulation_steps) / total_batches
        dice, iou = epoch_log(phase, epoch, epoch_loss, meter)

        epoch_loss = self.add_epoch_result(phase, epoch_loss, dice, iou)

        # self.losses[phase].append(float(epoch_loss))
        # self.dice_scores[phase].append(float(dice))
        # self.iou_scores[phase].append(float(iou))
        # torch.cuda.empty_cache()
        return float(epoch_loss)

    def start(self):
        scaler = GradScaler()
        for epoch in range(1, self.num_epochs+1):
            self.iterate(epoch, "train", scaler)
            with torch.no_grad():
                val_loss = self.iterate(epoch, "val", scaler)
                self.scheduler.step(self.iou_scores['val'][-1])

            if is_main_process():
                state = {
                    "epoch": epoch,
                    "best_loss": self.best_loss,
                    "best_dice": self.best_dice,
                    "best_iou": self.best_iou,
                    "state_dict": self.net.module.state_dict() if (self.gpu_type == 2) else self.net.state_dict(),
                }
                torch.save(state, self.out_dir + "/%s_latest_epoch.pth"%self.model_name)
            # nni.report_intermediate_result(self.iou_scores['val'][-1])
                

            if self.early_stop(epoch,
                {'loss':val_loss, 'dice':self.dice_scores['val'][-1], 'iou':self.iou_scores['val'][-1]}):
                if is_main_process():
                    logger.info('Early Stopping !!! Best Epoch is %d .' % self.best_epoch)
                break
            
            is_best_epoch, best_score = self.update_metrics(val_loss, self.dice_scores['val'][-1], self.iou_scores['val'][-1], epoch)
            
            if is_main_process() and is_best_epoch:
                torch.save(state, self.out_dir + "/%s_best_%s.pth"%(self.model_name, self.condition))
                logger.info('New optimal found (%s %.4f), state saved.' %( self.condition, best_score))

            if is_main_process():
                logger.info('Train Loss: %.4f, Dice: %.4f, IOU: %.4f .' % (
                    self.losses['train'][-1],
                    self.dice_scores['train'][-1],
                    self.iou_scores['train'][-1]
                    ))
                logger.info('Val Loss: %.4f, Dice: %.4f, IOU: %.4f .' % (
                    self.losses['val'][-1],
                    self.dice_scores['val'][-1],
                    self.iou_scores['val'][-1]
                    ))

                print()
        # nni.report_final_result(self.best_iou)
