import os
import oneflow as torch
import logging
import oneflow.distributed as dist
from oasr.train.utils import MeanLoss, map_to_cuda#Visulizer, 

logger = logging.getLogger(__name__)


class BaseTrainer(object):
    def __init__(self, params, model, optimizer, scheduler, is_visual=False,
                 expdir='./', ngpu=1, parallel_mode='dp', local_rank=0, is_debug=False,
                 keep_last_n_chkpt=30, from_epoch=0, from_step=0) -> None:
        super().__init__()

        self.params = params
        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.expdir = expdir
        self.is_visual = is_visual
        self.is_debug = is_debug

        self.ngpu = ngpu
        self.parallel_mode = parallel_mode
        self.local_rank = local_rank
        self.from_epoch = from_epoch
        self.from_step = from_step

        self.accum_steps = params['train']['accum_steps']
        self.grad_noise = params['train']['grad_noise']

        self.grad_clip = params['train']['clip_grad']
        self.global_training_step = 0
        self.global_epoch = 0
        self.log_interval = 10
        self.mean_loss = MeanLoss()
        self.training_infos = ['**Training INFO**']

        self.keep_last_n_chkpt = keep_last_n_chkpt

        # if self.is_visual and local_rank == 0:
        #     self.visulizer = Visulizer(log_dir=os.path.join(expdir, 'visual'))

        if self.params['train']['load_model']:
            self.load_model(self.params['train']['load_model'])
            logger.info('Load the checkpoint from %s' % self.params['train']['load_model'])
            self.training_infos.append('Load the checkpoint from %s' % self.params['train']['load_model'])

        if self.ngpu > 1:
            if self.parallel_mode == 'ddp':
                import oneflow.distributed as dist
                dist.init_process_group(backend="nccl", init_method='env://',
                                        rank=local_rank, world_size=self.ngpu)
                self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[local_rank], output_device=local_rank)
                logger.info('[DDP] Use %d gpus for training!' % self.ngpu)

            elif self.parallel_mode == 'dp':
                self.model = torch.nn.DataParallel(self.model, device_ids=[i for i in range(self.ngpu)])
                logger.info('[DP] Use %d gpus for training!' % self.ngpu)

            else:
                logging.warning('Please chose one of dp, ddp and hvd for parallel computing!')

        elif self.ngpu == 1:
            logger.info('Use only 1 gpu for training!')
        else:
            logger.info('Train the model in CPU!')

        if self.grad_noise > 0.0:
            logger.info('Apply Grad Noise mean = 0.0, std = %f' % self.grad_noise)
            self.training_infos.append(['Apply Grad Noise mean = 0.0, std = %f' % self.grad_noise])

    def eval(self, dev_loader):
        self.model.eval()
        eval_loss = 0
        for step, (_, batch) in enumerate(dev_loader):
            if self.ngpu > 0:
                batch = map_to_cuda(batch)
            loss = self.model(**batch)
            eval_loss += loss.item()

        return eval_loss / (step+1)

    def save_model(self, epoch=None, save_name=None):
        if save_name is None:
            save_name = 'model.epoch.%d.pt' % epoch

        if self.ngpu > 1:
            self.model.module.save_checkpoint(self.params, os.path.join(self.expdir, save_name))
        else:
            self.model.save_checkpoint(self.params, os.path.join(self.expdir, save_name))
        logger.info('Save the model checkpoint as %s!' % save_name)

    def save_optimizer_state_dict(self, save_name=None):
        if save_name is None:
            save_name = 'lastest_optimizer.pt'

        checkpoint = {
            'epoch': self.global_epoch,
            'global_step': self.scheduler.global_step,
            'global_epoch': self.scheduler.global_epoch,
            'optim': self.optimizer.state_dict()
        }

        torch.save(checkpoint, os.path.join(self.expdir, save_name))
        logger.info('Save the optimizer checkpoint!')

    def load_optimizer_state_dict(self, checkpoint):
        ochkpt = torch.load(checkpoint)
        #self.scheduler.global_step = ochkpt['global_step']
        # self.scheduler.global_epoch = ochkpt['global_epoch']
        self.optimizer.load_state_dict(ochkpt['optim'])
        logger.info('Load Optimizer State Dict from %s.' % checkpoint)

    def clear_checkpoint(self, epoch):
        if epoch + 1 > self.keep_last_n_chkpt:
            save_name = 'model.epoch.%d.pt' % (epoch - self.keep_last_n_chkpt)
            if os.path.isfile(os.path.join(self.expdir, save_name)):
                os.remove(os.path.join(self.expdir, save_name))
                logger.info('Delete the checkpoint %s to save memory!' % os.path.join(self.expdir, save_name))
        else:
            logger.info('There are no any checkpoints to be cleaned!')

    def load_model(self, checkpoint):
        chkpt = torch.load(checkpoint)
        if 'model' in chkpt:
            self.model.load_state_dict(chkpt['model'])
        else:
            if 'frontend' in chkpt:
                self.model.frontend.load_state_dict(chkpt['frontend'])
            if 'encoder' in chkpt:
                self.model.encoder.load_state_dict(chkpt['encoder'])
            if 'decoder' in chkpt:
                self.model.decoder.load_state_dict(chkpt['decoder'])
            if 'ctc' in chkpt:
                self.model.assistor.load_state_dict(chkpt['ctc'])
        logger.info('Load Saved Model State Dict from  %s.' % checkpoint)

    def auto_set_start_point(self, model_chkpt_name):
        raise NotImplementedError

    def get_rank(self):
        if self.parallel_mode == 'ddp':
            return dist.get_rank()
        else:
            return 0
        
