import oneflow as flow
import logging
import numpy as np
from oasr.recognize.base import Recognizer
from oasr.data import EOS, BOS, BLK

logger = logging.getLogger(__name__)


class TransformerRecognizer(Recognizer):
    def __init__(self, model, lm=None, lm_weight=0.1, ctc_weight=0.0, beam_width=5, nbest=1,
                 max_len=50, idx2unit=None, penalty=0.0, lamda=5, ngpu=1, sort_by_avg_score=False):
        super(TransformerRecognizer, self).__init__(model, idx2unit, lm, lm_weight, ngpu)

        self.beam_width = beam_width
        self.max_len = max_len

        self.penalty = penalty
        self.lamda = lamda
        self.nbest = nbest

        self.ctc_weight = ctc_weight
        self.lm_weight = lm_weight
        self.sort_by_avg_score = sort_by_avg_score

        if self.ctc_weight > 0.0:
            from sweet import CTCAssitorScorer
            self.ctc_scorer = CTCAssitorScorer(blank=BLK, eos=EOS, include_eos=True)
            logger.info('Apply joint ctc decodeing with CTC weight %.2f' % self.ctc_weight)

        if self.lm_weight > 0.0:
            logger.info('Apply language model with weight %.2f' % self.lm_weight)

        logger.info('Transformer Inference with penalty %.2f and lamda %d' % (self.penalty, self.lamda))

        if self.sort_by_avg_score:
            logger.info('Sort the beams by avg scores during inference!')

    def recognize(self, inputs, inputs_length):

        enc_states, enc_masks = self.encode(inputs, inputs_length)

        b, t, v = enc_states.size()

        beam_enc_states = enc_states.unsqueeze(1).repeat(
            [1, self.beam_width, 1, 1]).view(b * self.beam_width, t, v)
        beam_enc_mask = enc_masks.unsqueeze(1).repeat(
            [1, self.beam_width, 1, 1]).view(b * self.beam_width, 1, t)

        preds = flow.ones([b * self.beam_width, 1],
                           dtype=flow.long, device=enc_states.device) * BOS
        preds_len = flow.zeros([b * self.beam_width], dtype=flow.long, device=enc_states.device)

        scores = flow.FloatTensor(
            [0.0] + [-float('inf')] * (self.beam_width - 1))
        scores = scores.to(enc_states.device).repeat([b]).unsqueeze(1)
        ending_flag = flow.zeros_like(scores, dtype=flow.bool)

        hidden = None
        if self.ctc_weight > 0.0:
            ctc_log_probs, ctc_seq_lens = self.model.assistor.inference(enc_states, enc_masks)
            cb, ct, cv = ctc_log_probs.size()
            ctc_log_probs = ctc_log_probs.unsqueeze(1).repeat([1, self.beam_width, 1, 1]).view(cb * self.beam_width, ct, cv)
            ctc_seq_lens = ctc_seq_lens.unsqueeze(1).repeat([1, self.beam_width]).view(-1)
            ctc_states = self.ctc_scorer.initial_state(ctc_log_probs, ctc_seq_lens)
        else:
            ctc_seq_lens = 0
            ctc_log_probs = None
            ctc_states = None

        with flow.no_grad():
            for _ in range(1, self.max_len+1):
                preds, hidden, scores, ending_flag, ctc_states, preds_len = self.decode_step(
                    preds, beam_enc_states, beam_enc_mask,
                    hidden, scores, ctc_log_probs, ctc_states, ctc_seq_lens, preds_len, ending_flag)

                # whether stop or not
                if ending_flag.sum() == b * self.beam_width:
                    break

            scores = scores.view(b, self.beam_width)
            preds = preds.view(b, self.beam_width, -1)

            lengths = flow.sum(flow.ne(preds, EOS).float(), dim=-1)

            # length penalty
            if self.penalty:
                lp = flow.pow((self.lamda + lengths) /
                               (self.lamda + 1), self.penalty)
                scores /= lp

            sorted_scores, offset_indices = flow.sort(scores, dim=-1, descending=True)

            base_indices = flow.arange(b, dtype=flow.long, device=offset_indices.get_device()) * self.beam_width
            base_indices = base_indices.unsqueeze(1).repeat([1, self.beam_width]).view(-1)
            preds = preds.view(b * self.beam_width, -1)
            indices = offset_indices.view(-1) + base_indices

            # remove BOS
            sorted_preds = preds[indices].view(b, self.beam_width, -1)
            nbest_preds = sorted_preds[:, :min(self.beam_width, self.nbest), 1:]
            nbest_scores = sorted_scores[:, :min(self.beam_width, self.nbest)]

        return self.nbest_translate(nbest_preds), nbest_scores

    def encode(self, inputs, inputs_length):
        enc_states, enc_mask = self.model.encoder(inputs, inputs_length)
        return enc_states, enc_mask

    def decode_step(self, preds, enc_state, enc_mask, hidden, scores, ctc_log_probs, ctc_states, ctc_seq_lens, preds_len, flag):
        """ decode an utterance in a stepwise way"""

        batch_size = int(scores.size(0) / self.beam_width)

        batch_log_probs = self.model.decoder.recognize(
            preds, enc_state, enc_mask).detach()

        if self.lm is not None:
            batch_lm_log_probs, hidden = self.lm_decode(preds, hidden)
            batch_lm_log_probs = batch_lm_log_probs.squeeze(1)
            batch_log_probs = batch_log_probs + self.lm_weight * batch_lm_log_probs

        last_k_scores, last_k_preds = batch_log_probs.topk(self.beam_width)

        last_k_scores = mask_finished_scores(last_k_scores, flag)
        last_k_preds = mask_finished_preds(last_k_preds, flag)   # [batch_size * beam_width, beam_width]

        # update scores
        scores = scores + last_k_scores

        if self.ctc_weight > 0.0:
            
            batch_ctc_scores, batch_ctc_states = self.ctc_scorer.score(ctc_log_probs, ctc_seq_lens, preds, preds_len, last_k_preds, ctc_states)
            batch_ctc_scores = mask_finished_scores(batch_ctc_scores, flag)
            scores += self.ctc_weight * batch_ctc_scores
        else:
            batch_ctc_states = None

        # pruning
        if self.sort_by_avg_score:
            avg_scores = scores / (preds_len.float().unsqueeze(1) + 1)
            avg_scores = avg_scores.view(batch_size, self.beam_width * self.beam_width)
            _, offset_k_indices = flow.topk(avg_scores, k=self.beam_width)
        else:
            scores = scores.view(batch_size, self.beam_width * self.beam_width)
            _, offset_k_indices = flow.topk(scores, k=self.beam_width)
            

        device = scores.get_device()
        base_k_indices = flow.arange(batch_size, device=device).view(-1, 1).repeat([1, self.beam_width])
        base_k_indices *= self.beam_width ** 2
        best_k_indices = base_k_indices.view(-1) + offset_k_indices.view(-1)

        # update scores
        best_k_scores = flow.index_select(scores.view(-1), dim=-1, index=best_k_indices).view(-1, 1)

        # update predictions
        best_k_preds = flow.index_select(last_k_preds.view(-1), dim=-1, index=best_k_indices)
        preds_symbol = flow.index_select(preds, dim=0, index=best_k_indices.div(self.beam_width).long())
        
        preds_symbol = flow.cat((preds_symbol, best_k_preds.view(-1, 1)), dim=1)

        if hidden is not None:
            h = reselect_hidden(hidden[0], self.beam_width, best_k_indices)
            c = reselect_hidden(hidden[1], self.beam_width, best_k_indices)
            hidden = (h, c)

        if self.ctc_weight > 0.0:
            ctc_states = update_ctc_state(batch_ctc_states, best_k_indices)

        # finished or not
        end_flag = flow.eq(preds_symbol[:, -1], EOS).view(-1, 1)
        add_ones = flow.eq(preds_symbol[:, -1], EOS).view(-1, 1).long()
        
        preds_len = flow.sum(preds_symbol.ne(EOS), dim=-1).view(-1) + add_ones.view(-1)

        return preds_symbol, hidden, best_k_scores, end_flag, ctc_states, preds_len


def mask_finished_scores(score, flag):
    """
    If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score
    and the rest -inf score.
    Args:
        score: A real value array with shape [batch_size * beam_size, beam_size].
        flag: A bool array with shape [batch_size * beam_size, 1].
    Returns:
        A real value array with shape [batch_size * beam_size, beam_size].
    """
    beam_width = score.size(-1)
    zero_mask = flow.zeros_like(flag, dtype=flow.bool)
    if beam_width > 1:
        unfinished = flow.cat(
            (zero_mask, flag.repeat([1, beam_width - 1])), dim=1)
        finished = flow.cat(
            (flag.bool(), zero_mask.repeat([1, beam_width - 1])), dim=1)
    else:
        unfinished = zero_mask
        finished = flag.bool()
    score=flow.masked_fill(score,unfinished, -float('inf'))
    score=flow.masked_fill(score,finished, 0)
    return score


def mask_finished_preds(pred, flag):
    """
    If a sequence is finished, all of its branch should be </S> (3).
    Args:
        pred: A int array with shape [batch_size * beam_size, beam_size].
        flag: A bool array with shape [batch_size * beam_size, 1].
    Returns:
        A int array with shape [batch_size * beam_size].
    """
    beam_width = pred.size(-1)
    finished = flag.repeat([1, beam_width])
    pred = flow.masked_fill(pred, finished.bool(), EOS)
    return pred


def reselect_hidden(tensor, beam_width, indices):
    n_layers, batch_size, hidden_size = tensor.size()
    tensor = tensor.transpose(0, 1).unsqueeze(1).repeat([1, beam_width, 1, 1])
    tensor = tensor.reshape(batch_size * beam_width, n_layers, hidden_size)
    new_tensor = flow.index_select(tensor, dim=0, index=indices)
    new_tensor = new_tensor.transpose(0, 1).contiguous()
    return new_tensor


def update_ctc_state(tensor, indices):
    batch_size, _, time_step, last_dim = tensor.size()
    tensor = tensor.reshape(-1, time_step, last_dim)
    new_tensor = flow.index_select(tensor, dim=0, index=indices)
    new_tensor = new_tensor.reshape(batch_size, time_step, last_dim)
    return new_tensor
