import os
import oneflow as torch
import math
import time
import logging
from oasr.train.trainer import BaseTrainer
from oasr.train.utils import AverageMeter, Summary, map_to_cuda, AuxiliaryLossAverageMeter

logger = logging.getLogger(__name__)


class EpochTrainer(BaseTrainer):
    def __init__(self, params, model, optimizer, scheduler, is_visual=False, expdir='./exp', ngpu=1,
                 parallel_mode='dp', local_rank=0, is_debug=False, keep_last_n_chkpt=30):
        super().__init__(
            params, model, optimizer, scheduler, is_visual=is_visual, expdir=expdir,
            ngpu=ngpu, parallel_mode=parallel_mode, local_rank=local_rank, is_debug=is_debug,
            keep_last_n_chkpt=keep_last_n_chkpt)

        self.total_epochs = params['train']['epochs']

    def train(self, train_loader, dev_loader=None):

        self.training_infos.append('Begining to Train...')
        epochs = self.params['train']['epochs']
        TrainLossNote = Summary()
        DevLossNote = Summary()
        for epoch in range(self.from_epoch, self.total_epochs):

            train_loader.set_epoch(epoch)

            if self.ngpu > 1:
                self.model.module.set_epoch(epoch)
            else:
                self.model.set_epoch(epoch)
                
 
            train_loss = self.train_one_epoch(epoch, train_loader.loader)
            TrainLossNote.update(epoch, train_loss)
            self.scheduler.epoch()
            self.global_epoch += 1

            if self.local_rank == 0:
                logger.info('-*Train-Epoch-%d/%d*-, AvgLoss:%.5f' % (epoch, epochs, train_loss))
                self.training_infos.append('-*Train-Epoch-%d/%d*-, AvgLoss:%.5f' % (epoch, epochs, train_loss))

                self.save_model(epoch)
                self.save_optimizer_state_dict()
                # auto clear the redundant checkpoints to save memory
                self.clear_checkpoint(epoch)

            if self.is_visual and self.local_rank == 0:
                self.visulizer.add_scalar('train_epoch_loss', train_loss, epoch)

            if dev_loader is not None:
                dev_loss = self.eval(dev_loader.loader)
                DevLossNote.update(epoch, dev_loss)
                if self.local_rank == 0:
                    logger.info('-*Eval-Epoch-%d/%d*-, AvgLoss:%.5f' % (epoch, epochs, dev_loss))
                    self.training_infos.append('-*Eval-Epoch-%d/%d*-, AvgLoss:%.5f' % (epoch, epochs, dev_loss))
                
                if dev_loss < DevLossNote.best()[1] and self.local_rank == 0:
                    self.save_model('model.best.pt')
                    logger.info('Update the best checkpoint!')

        self.optimizer.zero_grad() # clean the residual grad

        if self.local_rank == 0:
            logger.info('Training Summary:')
            BEST_T_EPOCH, BEST_T_LOSS = TrainLossNote.best()
            logger.info('At the %d-st epoch of training, the model performs best (Loss:%.5f)!' % (BEST_T_EPOCH, BEST_T_LOSS))
            self.training_infos.append('At the %d-st epoch of training, the model performs best (Loss:%.5f)!' % (BEST_T_EPOCH, BEST_T_LOSS))
            
            if dev_loader is not None:
                BEST_E_EPOCH, BEST_E_LOSS = DevLossNote.best()
                logger.info('At the %d-st epoch of validation, the model performs best (Loss:%.5f)!' % (BEST_E_EPOCH, BEST_E_LOSS))
                self.training_infos.append('At the %d-st epoch of validation, the model performs best (Loss:%.5f)!' % (BEST_E_EPOCH, BEST_E_LOSS))
            
            if self.is_visual:
                self.visulizer.close()

    def train_one_epoch(self, epoch, train_loader):

        self.model.train()
        batch_steps = len(train_loader)

        step_loss = AverageMeter()
        auxiliary_loss = AuxiliaryLossAverageMeter()
        span = 0
        total_examples = 0
        for step, (uttids, inputs, targets) in enumerate(train_loader):

            self.global_training_step += 1

            if self.ngpu > 0:
                inputs = map_to_cuda(inputs)
                targets = map_to_cuda(targets)

            total_examples += len(uttids)
            start = time.time()
            
            # loss: tensor
            # axu_loss: dict {loss1: value1, loss2: value2}
            self.model.forward_hook(self.scheduler.global_step, self.scheduler.global_epoch)
            loss, aux_loss = self.model(inputs, targets)

            loss = torch.mean(loss) / self.accum_steps
            loss.backward()
            end = time.time()
            span += (end - start)

            if self.get_rank() == 0:
                step_loss.update(loss.item() * self.accum_steps, inputs['inputs'].size(0))
                auxiliary_loss.update(aux_loss, self.accum_steps, inputs['inputs'].size(0))

            if self.global_training_step % self.accum_steps == 0:
                if self.local_rank == 0:
                    self.mean_loss.update(step_loss.avg)

                grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
                
                if self.grad_noise > 0.0:
                    for p in self.model.parameters():
                        if p.requires_grad:
                            noise = torch.normal(0, self.grad_noise, p.grad.shape, device=loss.device)
                            p.grad += noise / self.accum_steps

                if math.isnan(grad_norm.numpy()):
                    logging.warning('Grad norm is NAN. DO NOT UPDATE MODEL!')
                else:
                    self.scheduler.step()
                    self.optimizer.step()
                self.optimizer.zero_grad()

                if self.is_visual and self.local_rank == 0:
                    self.visulizer.add_scalar('train_loss', loss.item(), self.scheduler.global_step)
                    self.visulizer.add_scalar('lr', self.scheduler.lr, self.scheduler.global_step)

                if self.scheduler.global_step % self.log_interval == 0 and self.local_rank == 0:
                    process = (step + 1) / batch_steps * 100
                    print_info = "-Training-Epoch-%d(%.5f%%), Global Step:%d, lr:%.8f, Loss:%.5f, AvgLoss: %.5f, %sCoverNExamples: %d, Run Time:%.3f, Examples/Sec: %.3f" \
                        % (epoch, process, self.scheduler.global_step, self.scheduler.lr, step_loss.avg, self.mean_loss.mean(), auxiliary_loss.avg_infos, \
                           total_examples, span, total_examples/span)
                    logger.info(print_info)
                    
                    span = 0
                    total_examples = 0

                step_loss.reset()
                auxiliary_loss.reset()

            if self.is_debug and step > 30:
                break

        return self.mean_loss.mean()

    def auto_set_start_point(self, model_chkpt_name):
        epoch = int(model_chkpt_name.split('.')[-2])
        self.global_epoch = epoch + 1
        self.from_epoch = epoch + 1
        self.scheduler.global_epoch = epoch + 1
        self.scheduler.set_lr()
        logger.info('Set the from_epoch to %d!' % (epoch + 1))
