import oneflow as torch
import logging
import oneflow.nn as nn
import logging
from oasr.model.base import BaseModel
from oasr.frontend import BuildFrontEnd
from oasr.encoder import BuildEncoder
from oasr.decoder.nart import NonAutoregressiveDecoder as BuildDecoder
from oasr.model.ctc import CTCAssistor
from oasr.module.loss import MaskedLabelSmoothingLoss, KLDivergenceLoss
from oasr.data import MASK, PAD
from oasr.decoder.utils import get_transformer_decoder_mask as get_autoregressive_mask

logger = logging.getLogger(__name__)


class NonAutoregressiveModel(BaseModel):
    def __init__(self, params):
        super(NonAutoregressiveModel, self).__init__()

        self.model_type = 'nart'
        self.params = params
        self.ctc_weight = params['ctc_weight']
        self.vector_as_input = params['decoder']['vector_as_input']

        self.frontend = BuildFrontEnd[params['frontend_type']](**params['frontend'])
        logging.info('Build a %s frontend!' % params['frontend_type'])
        self.encoder = BuildEncoder[params['encoder_type']](**params['encoder'])
        logging.info('Build a %s encoder!' % params['encoder_type'])
        self.decoder = BuildDecoder[params['decoder_type']](**params['decoder'])
        logging.info('Build a non-autoregressive %s decoder!' % params['decoder_type'])

        self.crit = MaskedLabelSmoothingLoss(
            size=params['decoder']['vocab_size'],
            smoothing=params['smoothing']
        )

        if self.ctc_weight > 0.0:
            self.assistor = CTCAssistor(
                hidden_size=params['encoder']['d_model'],
                vocab_size=params['decoder']['vocab_size'])
            logging.info('Apply a CTC Assistor with weight %.2f' % self.ctc_weight)

        self.mode = params['mode'] # mask-rate-ascend / two-step / full-mask / spike-triggered
        assert self.mode in ['mask-rate-ascend', 'two-step', 'full-mask', 'spike-triggered']

        self.iterative_training = params['iterative_training'] if 'iterative_training' in params else False
        self.iter_freq = params['iter_freq'] if 'iter_freq' in params else 0
        self.training_step = 0

        if self.mode == 'mask-rate-ascend':
            self.mask_init_prob = params['mask_init_prob']
            self.mask_ascend_ratio = params['mask_ascend_ratio']
            self.mask_ascend_stepwise = params['mask_ascend_stepwise']
            self.mask_prob = self.mask_init_prob
        elif self.mode == 'two-step':

            self.shared_decoder = params['shared_decoder'] if 'shared_decoder' in params else True

            if self.shared_decoder:
                logger.info('Share the NAR Decoder And AR Decoder!')
            else:
                self.ar_decoder = BuildDecoder[params['decoder_type']](**params['decoder'])
                logger.info('Build an autoregressive %s decoder!' % params['decoder_type'])

            if not self.iterative_training:
                self.ar_weight = params['ar_weight'] if 'ar_weight' in params else 0.5
                logger.info('Apply Two Step Training with AR Weight %.2f.' % self.ar_weight)
                self.distill_weight = params['distill_weight'] if 'distill_weight' in params else 0.0
                if self.distill_weight > 0.0:
                    self.kl_crit = KLDivergenceLoss()
                    logger.info('Apply KLDivergence (weight %.2f) to distill information from AR to NAR.' % self.distill_weight)

    def forward_hook(self, step, epoch, **kwargs):

        self.training_step = step
        if self.mode == 'mask-rate-ascend':
            if self.mask_ascend_stepwise:
                self.mask_prob = self.mask_init_prob + self.mask_ascend_ratio * step
                logger.debug('Set the mask prob to %.6f' % self.mask_prob)
            else:
                self.mask_prob = self.mask_init_prob + self.mask_ascend_ratio * epoch
                logger.debug('Set the mask prob to %.6f' % self.mask_prob)
        return 

    def FullMaskDecForward(self, targets, memory, memory_mask):

        mask_in = torch.ones_like(targets[:, 1:]) * MASK
        logits, _ = self.decoder(mask_in, memory, memory_mask)

        return self.crit(logits, targets[:, 1:])

    def MaskRateAscendDecForward(self, targets, memory, memory_mask):
        targets_in = targets[:, 1:].clone()
        mask = torch.rand_like(targets_in.float()) < self.mask_prob
        targets_in.masked_fill_(mask, MASK)

        logits, _ = self.decoder(targets_in, memory, memory_mask)
        return self.crit(logits, targets[:, 1:], ~mask)

    def IterativeTwoStepDecForward(self, targets, memory, memory_mask):
        target_out = targets[:, 1:]
        if self.training_step % self.iter_freq == 0:
            mask_in = torch.ones_like(target_out) * MASK
            nar_logits, _ = self.decoder(mask_in, memory, memory_mask)
            loss = self.crit(nar_logits, target_out)
        else:
            targets_in = targets[:, :-1]
            ar_mask = get_autoregressive_mask(targets_in)
            if self.shared_decoder:
                ar_logits, _ = self.decoder(targets_in, memory, memory_mask, dec_mask=ar_mask)
            else:
                ar_logits, _ = self.ar_decoder(targets_in, memory, memory_mask, dec_mask=ar_mask)
            loss = self.crit(ar_logits, target_out)
        return loss        

    def TwoStepDecForward(self, targets, memory, memory_mask):
        
        target_out = targets[:, 1:]
        mask_in = torch.ones_like(target_out) * MASK

        if self.ar_weight < 1.0:
            nar_logits, _ = self.decoder(mask_in, memory, memory_mask)
            loss_nar = self.crit(nar_logits, target_out)
        else:
            loss_nar = 0.0

        if self.ar_weight > 0.0:
            targets_in = targets[:, :-1]
            ar_mask = get_autoregressive_mask(targets_in)

            if self.shared_decoder:
                ar_logits, _ = self.decoder(targets_in, memory, memory_mask, dec_mask=ar_mask)
            else:
                ar_logits, _ = self.ar_decoder(targets_in, memory, memory_mask, dec_mask=ar_mask)
            loss_ar = self.crit(ar_logits, target_out)
        else:
            loss_ar = 0.0
    
        loss = self.ar_weight * loss_ar + (1 - self.ar_weight) * loss_nar

        if self.distill_weight > 0 and self.ar_weight < 1.0 and self.ar_weight > 0.0:
            loss_kl = self.kl_crit(nar_logits, ar_logits, target_out == PAD)
            loss += self.distill_weight * loss_kl

        return loss

    def SpikeTriggeredDecForward(self, targets, memory, memory_mask):
        raise NotImplementedError
        
    def forward(self, inputs, targets):

        enc_inputs = inputs['inputs']
        enc_mask = inputs['mask']

        truth = targets['targets']
        truth_length = targets['targets_length']

        # 1. forward encoder
        enc_inputs, enc_mask = self.frontend(enc_inputs, enc_mask)
        memory, memory_mask, _ = self.encoder(enc_inputs, enc_mask)

        # 2. forward decoder
        if self.mode == 'mask-rate-ascend':
            loss = self.MaskRateAscendDecForward(truth, memory, memory_mask)
        elif self.mode == 'two-step' and not self.iterative_training:
            loss = self.TwoStepDecForward(truth, memory, memory_mask)
        elif self.mode == 'two-step' and self.iterative_training:
            loss = self.IterativeTwoStepDecForward(truth, memory, memory_mask)
        elif self.mode == 'full-mask':
            loss = self.FullMaskDecForward(truth, memory, memory_mask)
        else:
            raise NotImplementedError

        if self.ctc_weight > 0:
            loss_ctc = self.compute_ctc_loss(memory, enc_mask, truth[:,1:], truth_length)
            return (1 - self.ctc_weight) * loss + self.ctc_weight * loss_ctc, {'CTCLoss': loss_ctc.item()}
        else:
            return loss, None

    def compute_ctc_loss(self, memory, memory_mask, targets_out, targets_length):
        memory_length = torch.sum(memory_mask.squeeze(1), dim=-1)
        loss_ctc = self.assistor(memory, memory_length, targets_out, targets_length)
        return loss_ctc

    def save_checkpoint(self, params, name):

        checkpoint = {
            'params': params,
            'model': self.state_dict()
            }

        torch.save(checkpoint, name)

    def set_epoch(self, epoch):
        if self.mode == 'mask-rate-ascend':
            logger.info('Set the mask prob to %.6f' % self.mask_prob)

