# system modules ...
import os

# torch modules ...
import torch
from torch.nn import Module
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, Dataset
from torch.optim import Optimizer

# self modules ...
from utils.progressbar import ProgressBar


# <class AverageMeter/>
class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self._vals = []
        self._val = 0
        self._avg = 0
        self._sum = 0
        self._cnt = 0
    
    def __str__(self):
        return """AverageMeter(avg={}, sum={}, cnt={}, curr_val={})""".format(self._avg, self._sum, self._cnt, self._val)

    def __repr__(self):
        return """AverageMeter(avg={}, sum={}, cnt={}, curr_val={})""".format(self._avg, self._sum, self._cnt, self._val)

    def reset(self):
        self._vals = []
        self._val = 0
        self._avg = 0
        self._sum = 0
        self._cnt = 0

    def update(self, val, n=1):
        self._val = val
        self._vals.append(val)
        self._sum += val * n
        self._cnt += n
        self._avg = self._sum / self._cnt
# </class AverageMeter>


# <class BaseTrainer/>
class BaseTrainer(object):
    """
    BaseTrainer
    -----------------------------------------------------------------------------------
    - train_loader (torch.utils.data.DataLoader or torch.utils.data.Dataset): Train data loader.  
    - val_loader (torch.utils.data.DataLoader or torch.utils.data.Dataset): Val data loader.  
    - eval_loader (torch.utils.data.DataLoader or torch.utils.data.Dataset): Test data loader.  
    - network (torch.nn.Module): Network to train.  
    - criterion (nn.Module): Loss calculation.  
    - optimizer (torch.optim.Optimizer): Optimizer for train.  
    - scheduler (torch.optim.lr_scheduler._LRScheduler): Method to adjust the learning rate based on the number of epochs.  
    - epochs (int): Number of epochs for train.  
    - device (string): Training device.  
    - tbx_writer (tensorboardX.SummaryWriter or torch.utils.tensorboard.SummaryWriter in version 1.1): A summary writer of tensorboard.  
    - checkpoints_folder (string): path to store the checkpoints.  
    - get_batch_input_func (python function): a function to get input tensors from the batch data enumerate by torch.utils.data.DataLoader. Is used to get runtime batch size.
    - get_batch_target_func (python function): a function to get target tensors from the batch data enumerate by torch.utils.data.DataLoader. No use in this version.
    """    
    # <method: __init__>
    def __init__(
        self, 
        train_loader=None, 
        val_loader=None, 
        eval_loader=None, 
        network=None, 
        criterion=None, 
        optimizer=None, 
        scheduler=None, 
        epochs=1000, 
        device='cpu', 
        tbx_writer=None, 
        checkpoints_folder='./checkpoints/'
        ):
        # checking ... 
        assert(isinstance(train_loader, (Dataset, DataLoader)))
        assert(isinstance(val_loader, (Dataset, DataLoader)))
        assert(isinstance(eval_loader, (Dataset, DataLoader)) if eval_loader is not None else True)   
        assert(isinstance(network, Module))
        assert(isinstance(criterion, Module))
        assert(isinstance(optimizer, Optimizer))
        assert(isinstance(tbx_writer, SummaryWriter))
        # next ...
        self._train_loader = train_loader
        self._val_loader = val_loader
        self._eval_loader = eval_loader
        self._network = network
        self._criterion = criterion
        self._optimizer = optimizer
        self._lr_scheduler = scheduler        
        self._epochs = epochs
        self._device = device
        self._tbx_writer = tbx_writer
        self._checkpoints_folder = checkpoints_folder
        # next ...
        self._best_val_loss = 1e32
        self._if_trained = False
        self._if_save_checkpoint_every_epoch = False
        # next ...
        if self._device:
            self._network = self._network.to(self._device)
        # end-if
        if not os.path.exists(self._checkpoints_folder):
            os.makedirs(self._checkpoints_folder)
        # end-if
    # <method: __init__>

    # <method: __get_val_batch_preds__>
    def __get_val_batch_preds__(self, batch_val_data, *args, **kwargs):
        """ This is method must be overload. User overload it to get val predicts of network"""
        raise NotImplementedError
        return [ ]
    # <method: __get_val_batch_preds__>

    # <method: __get_val_losses__>
    def __get_val_losses__(self, batch_preds, batch_val_data, *args, **kwargs):
        """ This is method must be overload. User overload it to get val loss using loss function of trainning"""
        raise NotImplementedError
        return {'total_loss': 0.0}
    # <method: __get_val_losses__>  

    # <method: __get_train_batch_preds__>
    def __get_train_batch_preds__(self, batch_train_data, *args, **kwargs):
        """ This is method must be overload. User overload it to get train predicts of network"""
        raise NotImplementedError
        return [ ]
    # <method: __get_train_batch_preds__>

    # <method: __get_train_losses__>
    def __get_train_losses__(self, batch_preds, batch_train_data, *args, **kwargs):
        """ This is method must be overload. User overload it to get train loss using loss function of trainning"""
        raise NotImplementedError
        return {'total_loss': 0.0}
    # <method: __get_train_losses__>

    # <method: __eval__>
    def __eval__(self, epoch, *args, **kwargs):
        raise NotImplementedError
    # <method: __eval__>

    # <method: __if_stop_trainning__>
    def __if_stop_trainning__(self, *args, **kwargs):
        raise NotImplementedError
        return False
    # <method: __if_stop_trainning__>

    # <method: __val_an_epoch__>
    def __val_an_epoch__(self, epoch, *args, **kwargs):
        """ This is a overload-method for user to get the losses(a dict of AverageMeter objects, must has key of 'total_loss') in validating an epoch """
        losses_val = {'total_loss': AverageMeter()}
        pbar = ProgressBar(Title = "Val "+str(epoch), Total = len(self._val_loader))
        with torch.no_grad():
            for batch_idx, batch_data in enumerate(self._val_loader):
                if (not isinstance(batch_data, tuple)) and (not isinstance(batch_data, list)) and (not isinstance(batch_data, dict)):
                    raise RuntimeError("Unavailable batch_data in enumerate(self._val_loader).")
                # end-if
                # batch_size = self._train_loader.batch_size
                # if self._get_batch_input_func is not None:
                #     inputs = self._get_batch_input_func(batch_data)
                #     batch_size = inputs.size(0)
                # # end-if
                batch_preds = self.__get_val_batch_preds__(batch_data, *args, **kwargs)
                losses_dict = self.__get_val_losses__(batch_preds, batch_data, *args, **kwargs)
                if 'total_loss' not in list(losses_dict.keys()):
                    raise RuntimeError("self.__get_val_losses__ must return a dict that contained a key-loss-pair using 'total_loss' as a key.")
                # end-if
                for key in list(losses_dict.keys()):
                    if key not in list(losses_val.keys()):
                        losses_val[key] = AverageMeter()
                    # end-if
                pbar_postfix_str = ""
                for key in list(losses_dict.keys()):
                    losses_val[key].update(losses_dict[key].item(), self._train_loader.batch_size)
                    pbar_postfix_str += "[{0}: {1:.3f}] ".format(key, losses_val[key]._avg)
                # end-for
                pbar.set_postfix_str(pbar_postfix_str)
                pbar.update()
                # end-for   
        pbar.close()         
        return losses_val
    # <method: __val_an_epoch__>

    # <method: __train_an_epoch__>
    def __train_an_epoch__(self, epoch, *args, **kwargs):
        """ This is a overload-method for user to get the losses(a dict of AverageMeter objects, must has key of 'total_loss') in trainning an epoch """
        losses_train = {'total_loss': AverageMeter()}
        pbar = ProgressBar(Title = "Train "+str(epoch), Total = len(self._train_loader))
        for batch_idx, batch_data in enumerate(self._train_loader):
            if (not isinstance(batch_data, tuple)) and (not isinstance(batch_data, list)) and (not isinstance(batch_data, dict)):
                raise RuntimeError("Unavailable batch_data in enumerate(self._train_loader).")
            # end-if
            self._optimizer.zero_grad()
            # batch_size = self._train_loader.batch_size
            # if self._get_batch_input_func is not None:
            #     inputs = self._get_batch_input_func(batch_data)
            #     batch_size = inputs.size(0)
            # # end-if
            batch_preds = self.__get_train_batch_preds__(batch_data, *args, **kwargs)
            losses_dict = self.__get_train_losses__(batch_preds, batch_data, *args, **kwargs)
            if 'total_loss' not in list(losses_dict.keys()):
                raise RuntimeError("self.__get_train_losses__ must return a dict that contained a key-loss-pair using 'total_loss' as a key.")
            # end-if
            losses_dict['total_loss'].backward()
            self._optimizer.step()
            for key in list(losses_dict.keys()):
                if key not in list(losses_train.keys()):
                    losses_train[key] = AverageMeter()
                # end-if
            # end-for
            pbar_postfix_str = ""
            for key in list(losses_dict.keys()):
                if losses_dict[key].item() > 0:
                    losses_train[key].update(losses_dict[key].item(), self._train_loader.batch_size)
                # end-if
                pbar_postfix_str += "[{0}: {1:.3f}] ".format(key, losses_train[key]._avg)
            # end-for
            pbar.set_postfix_str(pbar_postfix_str)
            pbar.update()
        # end-for
        pbar.close()
        return losses_train
    # <method: __train_an_epoch__>

    # <method: __get_best_trained_network__>
    def __get_best_trained_network__(self, state_dict_name = 'best_valloss_checkpoint.pth.tar'):
        sd = torch.load(os.path.join( self._checkpoints_folder, state_dict_name ))
        self._network.to( sd['device'] )
        self._network.load_state_dict( sd['net'] )
        if self._device:
            self._network.to(self._device)
        # end-if
        return self._network
    # <method: __get_best_trained_network__>

    # <method: __run__>
    def __call__(self, *args, **kwargs):
        for epoch in range(self._epochs + 1):    # loop over the dataset multiple times
            # 
            # eval ...
            if self._eval_loader is not None:
                self._network.eval()
                self.__eval__(epoch, *args, **kwargs)
            # end-if
            if epoch == self._epochs:
                break
            # end-if
            if self.__if_stop_trainning__(*args, **kwargs):
                print("Train stop.")
                break
            # end-if
            # 
            # train ...
            for param in self._optimizer.param_groups:
                lr=param["lr"]
                print("""lr(epoch-{}) : {}""".format(epoch, lr))
                if self._tbx_writer is not None:
                    self._tbx_writer.add_scalar("Learning Rate", lr, epoch)
                # end-if
            # end-for
            self._network.train()
            losses_train = self.__train_an_epoch__(epoch, *args, **kwargs)
            if 'total_loss' not in losses_train.keys():
                raise RuntimeError("'total_loss' not in losses_train.keys()")
            # end-if
            for loss_key in list(losses_train.keys()):
                if isinstance(losses_train[loss_key], AverageMeter):
                    losses_train[loss_key] = losses_train[loss_key]._avg
                # end-if
                if self._tbx_writer is not None:
                    self._tbx_writer.add_scalar("""Train<{}>""".format(loss_key), losses_train[loss_key], epoch)
                # endi-if
            # end-for
            # 
            # val ...
            self._network.eval()
            with torch.no_grad():
                losses_val = self.__val_an_epoch__(epoch, *args, **kwargs)
                if 'total_loss' not in losses_val.keys():
                    raise RuntimeError("'total_loss' not in losses_val.keys()")
                # end-if
                for loss_key in list(losses_val.keys()):
                    if isinstance(losses_val[loss_key], AverageMeter):
                        losses_val[loss_key] = losses_val[loss_key]._avg
                    # end-if
                    if self._tbx_writer is not None:
                        self._tbx_writer.add_scalar("""Val<{}>""".format(loss_key), losses_val[loss_key], epoch)
                    # end-if
                    pass
                # end-for
            # end-with
            # update learning rate scheduler ... 
            if self._lr_scheduler:                        
                if self._lr_scheduler.__class__.__name__ == 'ReduceLROnPlateau':
                    self._lr_scheduler.step(losses_val['total_loss'])
                else:
                    self._lr_scheduler.step(epoch)
                # end-if
            # end-if
            # 
            # save params and best-val-loss-checkpoint ...                
            net_sd = self._network.state_dict()
            for key in list(net_sd.keys()):
                net_sd[key] = net_sd[key].to('cpu')
            # end-for
            checkpoint_dict = {
                'device': 'cpu',
                'state_dict': net_sd,
                'losses_train': losses_train,
                'losses_val': losses_val,
                'optimizer':self._optimizer
                }
            if self._if_save_checkpoint_every_epoch:
                torch.save(checkpoint_dict, os.path.join( self._checkpoints_folder, """checkpoint-{}.pth.tar""".format(epoch) ))
            # end-if
            if losses_val['total_loss'] < self._best_val_loss:
                torch.save(checkpoint_dict, os.path.join( self._checkpoints_folder, "best_valloss_checkpoint.pth.tar" ))
                self._best_val_loss = losses_val['total_loss']
            # end-if
        # end-for
        self._if_trained = True
        return self._if_trained
    # <method: __run__>
# </class BaseTrainer>