import os
import glob
import oneflow as torch
import math
import time
import logging
from oasr.train.trainer import BaseTrainer
from oasr.train.utils import AverageMeter, map_to_cuda, AuxiliaryLossAverageMeter


logger = logging.getLogger(__name__)


class StepTrainer(BaseTrainer):
    def __init__(self, params, model, optimizer, scheduler, is_visual=False, expdir='./exp', ngpu='1',
                 parallel_mode='dp', local_rank=0, is_debug=False, keep_last_n_chkpt=30):
        super().__init__(params, model, optimizer, scheduler, is_visual=is_visual, expdir=expdir,
                         ngpu=ngpu, parallel_mode=parallel_mode, local_rank=local_rank, is_debug=is_debug,
                         keep_last_n_chkpt=keep_last_n_chkpt)

        assert scheduler.stepwise is True

        self.total_steps = params['train']['max_steps']
        self.eval_freq = params['train']['eval_freq'] if 'eval_freq' in params['train'] else params['train']['max_steps'] + 1
        self.save_freq = params['train']['save_freq']


    def train(self, train_loader, dev_loader=None):

        self.model.train()
        self.training_infos.append('Begining to Train...')

        step_loss = AverageMeter()
        auxiliary_loss = AuxiliaryLossAverageMeter()
        
        span = 0
        total_examples = 0
        loader = iter(train_loader.loader)
        logger.info('[DataLoader]-Training-Epoch-%d-DataLoader ReStart!' % self.global_epoch)
        for step in range(self.from_step, self.total_steps * self.accum_steps):
            try:
                uttids, inputs, targets = next(loader)
            except:
                self.global_epoch += 1
                logger.info('[DataLoader]-Training-Epoch-%d-DataLoader ReStart!' % self.global_epoch)
                loader = iter(train_loader.loader)
                uttids, inputs, targets = next(loader)

            if self.ngpu > 0:
                inputs = map_to_cuda(inputs)
                targets = map_to_cuda(targets)

            total_examples += len(uttids)
            start = time.time()
            
            # loss: tensor
            # axu_loss: dict {loss1: value1, loss2: value2}
            self.model.forward_hook(self.scheduler.global_step, self.scheduler.global_epoch)
            loss, aux_loss = self.model(inputs, targets)

            loss = torch.mean(loss) / self.accum_steps
            loss.backward()
            end = time.time()
            span += (end - start)

            if self.get_rank() == 0:
                step_loss.update(loss.item() * self.accum_steps, inputs['inputs'].size(0))
                auxiliary_loss.update(aux_loss, self.accum_steps, inputs['inputs'].size(0))

            if (step+1) % self.accum_steps == 0:
                if self.local_rank == 0:
                    self.mean_loss.update(step_loss.avg)

                grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
                
                if self.grad_noise > 0.0:
                    for p in self.model.parameters():
                        if p.requires_grad:
                            noise = torch.normal(0, self.grad_noise, p.grad.shape, device=loss.device)
                            p.grad += noise / self.accum_steps

                if math.isnan(grad_norm.numpy()):
                    logging.warning('Grad norm is NAN. DO NOT UPDATE MODEL!')
                else:
                    self.scheduler.step()
                    self.optimizer.step()
                self.optimizer.zero_grad()

                if self.is_visual and self.local_rank == 0:
                    self.visulizer.add_scalar('train_loss', loss.item(), self.scheduler.global_step)
                    self.visulizer.add_scalar('lr', self.scheduler.lr, self.scheduler.global_step)

                if self.scheduler.global_step % self.log_interval == 0 and self.local_rank == 0:
                    print_info = "-Training-Step: %d/%d, lr:%.8f, Loss:%.5f, AvgLoss: %.5f, %sCoverNExamples: %d, Run Time:%.3f, Examples/Sec: %.3f" \
                        % (self.scheduler.global_step, self.total_steps, self.scheduler.lr, step_loss.avg, self.mean_loss.mean(), \
                           auxiliary_loss.avg_infos, total_examples, span, total_examples / span)
                    print_info += auxiliary_loss.avg_infos
                    logger.info(print_info)
                    
                    span = 0
                    total_examples = 0

                step_loss.reset()
                auxiliary_loss.reset()

                if self.local_rank == 0 and self.scheduler.global_step % self.save_freq == 0:
                    self.save_model(save_name='model.step.%d.pt' % self.scheduler.global_step)
                    self.save_optimizer_state_dict()
                    # auto clear the redundant checkpoints to save memory
                    self.clear_step_checkpoint()

                if dev_loader is not None and self.scheduler.global_step % self.eval_freq == 0:
                    dev_loss = self.eval(dev_loader)
                    if self.local_rank == 0:
                        logger.info('-*Eval-Step-%d/%d*-, AvgLoss:%.5f' % (step, self.total_steps, dev_loss))

        self.optimizer.zero_grad() # clean the residual grad
            
        if self.is_visual:
            self.visulizer.close()
        logger.info('The training process is FINISHED!')


    def clear_step_checkpoint(self):

        chkpts = glob.glob(os.path.join(self.expdir, 'model*pt'))

        if len(chkpts) > self.keep_last_n_chkpt:
            idlist = []
            for chkpt in chkpts:
                id = int(chkpt.split('.')[-2])
                idlist.append(id)
            
            idlist.sort(reverse=True)
            for idx in idlist[self.keep_last_n_chkpt:]:
                save_name = 'model.step.%d.pt' % idx
                os.remove(os.path.join(self.expdir, save_name))
                logger.info('Delete the checkpoint %s to save memory!' % os.path.join(self.expdir, save_name))
        else:
            logger.info('There are no any checkpoints to be cleaned!')

    def auto_set_start_point(self, model_chkpt_name):
        step = int(model_chkpt_name.split('.')[-2])
        self.global_training_step = step * self.accum_steps + 1
        self.from_step = step * self.accum_steps
        self.scheduler.global_step = step + 1
        self.scheduler.set_lr()
        logger.info('Set the from_step to %d!' % (step + 1))
