import oneflow as torch
import logging
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.model.base import BaseModel
from oasr.frontend import BuildFrontEnd
from oasr.encoder import BuildEncoder as TransducerEncoder
from oasr.decoder.transducer import TransducerDecoder
from oasr.module.dropout import TimeDropout
from oasr.module.loss import LabelSmoothingLoss, CumulativeCrossEntropyLoss
from oasr.data import BLK, PAD

logger = logging.getLogger(__name__)

class JointNet(nn.Module):
    def __init__(self, enc_state_dim, dec_state_dim, project_size, vocab_size, joint_norm=False):
        super(JointNet, self).__init__() 

        self.enc_project_layer = nn.Linear(enc_state_dim, project_size)
        self.dec_project_layer = nn.Linear(dec_state_dim, project_size)

        self.joint_norm = joint_norm
        if self.joint_norm:
            self.enc_norm = nn.LayerNorm(project_size)
            self.dec_norm = nn.LayerNorm(project_size)
            logger.info('Apply Layer Norm to the joint net!')

        self.output_layer = nn.Linear(project_size, vocab_size)

    def forward(self, enc_state, dec_state):

        enc_state = self.enc_project_layer(enc_state)
        dec_state = self.dec_project_layer(dec_state)

        if self.joint_norm:
            enc_state = self.enc_norm(enc_state)
            dec_state = self.dec_norm(dec_state)

        _, t, _ = enc_state.size()
        u = dec_state.size(1)

        enc_state = enc_state.unsqueeze(2).repeat([1, 1, u, 1])
        dec_state = dec_state.unsqueeze(1).repeat([1, t, 1, 1])

        return self.joint(enc_state, dec_state)

    def inference(self, enc_state, dec_state):

        assert enc_state.dim() == 3 and enc_state.size(1) == 1
        assert dec_state.dim() == 3 and dec_state.size(1) == 1

        enc_state = self.enc_project_layer(enc_state)
        dec_state = self.dec_project_layer(dec_state)

        if self.joint_norm:
            enc_state = self.enc_norm(enc_state)
            dec_state = self.dec_norm(dec_state)

        return self.joint(enc_state, dec_state)

    def joint(self, enc_state, dec_state):
        joint_state = enc_state + dec_state
        joint_state = torch.tanh(joint_state)

        return self.output_layer(joint_state)


class TransducerModel(BaseModel):
    def __init__(self, params):
        super(TransducerModel, self).__init__()

        self.model_type = 'transducer'
        self.rnnt_loss_type = params['rnnt_loss_type'] if 'rnnt_loss_type' in params else 'warprnnt'
        assert self.rnnt_loss_type in [ 'warprnnt']
        logger.info('Apply %s as RNNT Loss Function!' % self.rnnt_loss_type)

        self.frontend = BuildFrontEnd[params['frontend_type']](**params['frontend'])
        logger.info('Build a %s frontend!' % params['frontend_type'])

        self.encoder = TransducerEncoder[params['encoder_type']](**params['encoder'])
        logger.info('Build a %s encoder!' % params['encoder_type'])

        self.decoder = TransducerDecoder[params['decoder_type']](**params['decoder'])
        logger.info('Build a %s decoder!' % params['decoder_type'])

        self.joint = JointNet(
            enc_state_dim=self.encoder.output_size,
            dec_state_dim=self.decoder.output_size,
            project_size=params['joint']['project_size'],
            vocab_size=params['decoder']['vocab_size'],
            joint_norm=params['joint']['joint_norm']
        )

        if 'lookahead_steps' in params or 'lookback_steps' in params:
            self.apply_look_back_ahead = True
            self.lookahead_steps = params['lookahead_steps'] if 'lookahead_steps' in params else 0
            self.lookback_steps = params['lookback_steps'] if 'lookback_steps' in params else 0
            logger.info('[Transducer] Apply %d lookahead steps and %d lookback steps' % (self.lookahead_steps, self.lookback_steps))
            self.lookahead_conv = nn.Conv1d(
                    in_channels=self.encoder.output_size,
                    out_channels=self.encoder.output_size,
                    kernel_size=self.lookback_steps + self.lookahead_steps + 1,
                    padding=0, stride=1, bias=False,
                    #groups=self.encoder.output_size      
            )  
        else:
            self.apply_look_back_ahead = False

        self.lm_ce_weight = params['lm_ce_weight'] if 'lm_ce_weight' in params else 0.0
        if self.lm_ce_weight > 0.0:
            self.lm_project_layer = nn.Linear(self.decoder.output_size, params['decoder']['vocab_size'])
            logger.info('Apply a LM Task (Wegiht: %.2f) for assisting the training with!' % self.lm_ce_weight)

            self.lm_ce_crit = LabelSmoothingLoss(
                params['decoder']['vocab_size'], smoothing=0.1
            )

        self.am_state_dropout = params['am_state_dropout'] if 'am_state_dropout' in params else 0.0
        logger.info('Set the am_state_dropout to %1f' % self.am_state_dropout)
        self.am_dropout = TimeDropout(p=self.am_state_dropout)

    def forward(self, inputs, targets):


        enc_inputs = inputs['inputs']
        enc_mask = inputs['mask']
        a = enc_inputs.transpose(1,0)
        a.requires_grad_(True)
        enc_states, enc_mask = self.encode(enc_inputs, enc_mask)
        targets_in = targets['targets'][:, :-1].clone()
       
        if self.decoder.model_type == 'rnn':
            dec_states, _ = self.decoder(targets_in, None)
        else:
            dec_states = self.decoder(targets_in)
        logits = self.joint(enc_states, dec_states)

        log_probs = F.log_softmax(logits, dim=-1)

        targets_out = targets['targets'][:, 1:-1].clone()
        time_lens = torch.sum(enc_mask.squeeze(1), dim=-1).int()
        lable_lens = targets['targets_length'].add(-1).int()
 
        loss = 0
        aux_loss = {}

        if self.lm_ce_weight:
            lm_ce_loss, _ = self.lm_ce_task(targets['targets'][:, 1:], dec_states)
            loss += self.lm_ce_weight * lm_ce_loss
            aux_loss['LM-CELoss'] = lm_ce_loss.item()
            aux_loss['LM-PPL'] = 2 ** lm_ce_loss.item()

        loss_mean = torch.nn.CTCLoss(blank = 0,reduction='mean')
        loss_rnnt = loss_mean(a, targets_out.int(), time_lens, lable_lens)
        loss += loss_rnnt

        return loss, None if len(aux_loss) == 0 else aux_loss

    def save_checkpoint(self, params, name):

        checkpoint = {
            'params': params,
            'model': self.state_dict(),
        }        

        torch.save(checkpoint, name)

    def encode(self, enc_inputs, enc_mask):
        enc_inputs, enc_mask = self.frontend(enc_inputs, enc_mask)
        enc_states, enc_mask, _ = self.encoder(enc_inputs, enc_mask)

        if self.apply_look_back_ahead:
            enc_states = F.pad(enc_states, pad=(0, 0, self.lookback_steps, self.lookahead_steps), value=0.0)
            enc_states = enc_states.transpose(1, 2)
            enc_states = self.lookahead_conv(enc_states)
            enc_states = enc_states.transpose(1, 2)

        return enc_states, enc_mask

    def decode(self, tokens, enc_states, hidden=None):
        if self.decoder.model_type == 'rnn':
            dec_states, hidden = self.decoder(tokens, hidden)
        else:
            dec_states = self.decoder(tokens)
        logits = self.joint(enc_states, dec_states)
        return F.log_softmax(logits, dim=-1), hidden

    def lm_ce_task(self, target_out, dec_states):
        ce_logits = self.lm_project_layer(dec_states)
        ce_loss = self.lm_ce_crit(ce_logits, target_out)
        return ce_loss, ce_logits

    def ctc_task(self, enc_states, enc_length, targets_out, targets_length):
        ctc_logits = self.ctc_assistor.compute_logits(enc_states)
        ctc_loss = self.ctc_assistor.compute_loss(ctc_logits, enc_length, targets_out, targets_length)
        return ctc_logits, ctc_loss

    def load_encoder(self, checkpoint):
        self.encoder.load_state_dict(checkpoint['encoder'])

    def load_model(self, chkpt):
        self.encoder.load_state_dict(chkpt['encoder'])
        self.decoder.load_state_dict(chkpt['decoder'])
        self.joint.load_state_dict(chkpt['joint'])

        if 'look_ahead_conv' in chkpt:
            self.lookahead_conv.load_state_dict(chkpt['look_ahead_conv'])




