import os
import oneflow as torch
from oasr.data import EOS, BOS, MASK
from oasr.recognize.base import Recognizer
import oneflow.nn.functional as F
from oasr.recognize.utils import mask_finished_scores, mask_finished_preds, reselect_hidden
from oasr.decoder.utils import get_transformer_decoder_mask as get_autoregressive_mask
from oasr.vis import mha_visualization_mean


def get_nart_mask(tokens, eos):
    mask = torch.ones_like(tokens, dtype=torch.uint8)
    batch_size, beam_width, length = tokens.size()
    for b in range(batch_size):
        for i in range(beam_width):
            for l in range(length):
                token = tokens[b, i, l].item()
                mask[b, i, l] = 0
                if token == eos:
                    break
    return mask > 0


class NARTRecognizer(Recognizer):
    def __init__(self, model, lm=None, lm_weight=0.1, beam_width=5, nbest=1, max_len=60, idx2unit=None, ngpu=1,
                 mode='greedy', apply_two_pass_decode=False, ar_weight=1.0, is_visual=False, output_dir=None):
        super(NARTRecognizer, self).__init__(model, idx2unit, lm, lm_weight, ngpu)

        self.beam_width = beam_width
        self.max_len = max_len
        self.nbest = nbest
        self.mode = mode

        self.lm_weight = lm_weight
        self.ar_weight = ar_weight

        self.attn_weights = {}

        if self.model.mode == 'two-step' and apply_two_pass_decode:
            self.apply_two_pass_decode = True
        else:
            self.apply_two_pass_decode = False

    def encode(self, inputs, inputs_mask):
        inputs, inputs_mask = self.model.frontend(inputs, inputs_mask)
        memory, memory_mask, enc_attn_weights = self.model.encoder(inputs, inputs_mask)
        return memory, memory_mask, enc_attn_weights

    def greedy_search(self, log_probs):
        scores, preds = log_probs.topk(1, dim=-1)
        preds = preds.transpose(1, 2)
        mask = get_nart_mask(preds, EOS)
        preds.masked_fill_(mask, 0.0)
        scores = scores.transpose(1, 2).masked_fill_(mask, 0.0)
        scores = torch.sum(scores, dim=-1)
        return preds, scores

    def beam_search(self, log_probs):

        batch_size, max_steps, vocab_size = log_probs.size()
        device = log_probs.device
        scores = torch.FloatTensor([0.0] + [-float('inf')] * (self.beam_width - 1)).to(device)
        scores = scores.repeat([batch_size]).unsqueeze(1)

        ending_flag = torch.zeros([batch_size * self.beam_width, 1], dtype=torch.bool, device=device)
        preds = torch.ones([batch_size * self.beam_width, 1], dtype=torch.long, device=device)
        hidden = None

        for t in range(max_steps):

            batch_log_probs = log_probs[:, t, :].unsqueeze(1).repeat([1, self.beam_width, 1])
            batch_log_probs = batch_log_probs.reshape(batch_size * self.beam_width, vocab_size)

            if self.lm is not None:
                batch_lm_log_probs, hidden = self.lm_decode(preds, hidden)
                batch_lm_log_probs = batch_lm_log_probs.squeeze(1)
                batch_log_probs = batch_log_probs + self.lm_weight * batch_lm_log_probs

            last_k_scores, last_k_preds = batch_log_probs.topk(self.beam_width)

            last_k_scores = mask_finished_scores(last_k_scores, ending_flag)
            last_k_preds = mask_finished_preds(last_k_preds, ending_flag)

            # update scores
            scores = scores + last_k_scores
            scores = scores.view(batch_size, self.beam_width * self.beam_width)

            # pruning
            scores, offset_k_indices = torch.topk(scores, k=self.beam_width)
            scores = scores.view(-1, 1)

            base_k_indices = torch.arange(batch_size, device=device).view(-1, 1).repeat([1, self.beam_width])
            base_k_indices *= self.beam_width ** 2
            best_k_indices = base_k_indices.view(-1) + offset_k_indices.view(-1)

            # update predictions
            best_k_preds = torch.index_select(last_k_preds.view(-1), dim=-1, index=best_k_indices)
            best_k_indices = best_k_indices.float().div(self.beam_width).long()
            preds = torch.index_select(preds, dim=0, index=best_k_indices)
            preds = torch.cat((preds, best_k_preds.view(-1, 1)), dim=1)

            if hidden is not None and self.lm is not None:
                h = reselect_hidden(hidden[0], self.beam_width, best_k_indices)
                c = reselect_hidden(hidden[1], self.beam_width, best_k_indices)
                hidden = (h, c)

            # finished or not
            ending_flag = torch.eq(preds[:, -1], EOS).view(-1, 1)

        scores = scores.view(batch_size, self.beam_width)

        return preds[:, 1:].reshape(batch_size, self.beam_width, -1), scores

    def nar_decode_from_mask(self, init_tokens, memory, memory_mask, nbest=1):
        logits, fst_pass_attn_weights = self.model.decoder(init_tokens, memory, memory_mask)
        log_probs = F.log_softmax(logits, dim=-1)

        if self.mode == 'beam':
            preds, scores = self.beam_search(log_probs)
        else:
            preds, scores = self.greedy_search(log_probs)

        return preds, scores, fst_pass_attn_weights

    def nar_decode_from_vector(self, init_vectors, memory, memory_mask):
        raise NotImplementedError
    
    def ar_rescoring(self, preds, preds_mask, nar_scores, memory, memory_mask):
        """ scoring the predicted sequence in an autoregressive way

        Args:
            preds ([LongTensor]): 
            nar_scores ([type]): [description]
            memory ([type]): [description]
            memory_mask ([type]): [description]

        Returns:
            [type]: [description]
        """

        batch_size, beam_width, max_lens = preds.size()
        preds = preds.reshape(-1, max_lens)

        _, max_time_len, model_size = memory.size()
        beam_memory = memory.unsqueeze(1).repeat([1, beam_width, 1, 1]).reshape([-1, max_time_len, model_size])
        beam_memory_mask = memory_mask.unsqueeze(1).repeat([1, beam_width, 1, 1]).reshape([-1, max_time_len])

        eos_pad = torch.ones([batch_size * beam_width, 1], dtype=torch.long) * BOS
        preds_in = torch.cat([eos_pad.to(preds.device), preds[:, :-1]], dim=-1)
        ar_mask = get_autoregressive_mask(preds_in)

        if self.model.shared_decoder:
            ar_logits, sec_pass_attn_weights = self.model.decoder(preds_in, beam_memory, beam_memory_mask, dec_mask=ar_mask)
        else:
            ar_logits, sec_pass_attn_weights = self.model.ar_decoder(preds_in, beam_memory, beam_memory_mask, dec_mask=ar_mask)
        # [batch * beam, max_lens, vocab_size]
        as_log_probs = F.log_softmax(ar_logits, dim=-1)
        vocab_size = as_log_probs.size(-1)

        base_index = torch.arange(batch_size * beam_width * max_lens, device=memory.device)
        bias_index = preds.reshape(-1)

        index = base_index * vocab_size + bias_index
        ar_scores = torch.index_select(as_log_probs.reshape(-1), dim=-1, index=index)
        ar_scores = ar_scores.reshape(batch_size, beam_width, max_lens)
        ar_scores.masked_fill_(preds_mask, 0.0)

        ar_scores = torch.sum(ar_scores, dim=-1)

        scores = (1 - self.ar_weight) * nar_scores + self.ar_weight * ar_scores
        
        scores, indices = torch.sort(scores, dim=-1, descending=True)
        base_index = torch.arange(batch_size, device=indices.device).unsqueeze(1).repeat([1, beam_width]) * beam_width
        index = base_index.reshape(-1) + indices.reshape(-1)
       
        preds = torch.index_select(preds, 0, index).reshape(batch_size, beam_width, max_lens)

        return preds, scores, sec_pass_attn_weights, index

    def recognize(self, inputs, inputs_mask, uttid):

        memory, memory_mask, _ = self.encode(inputs, inputs_mask)

        if self.model.vector_as_input:
            raise NotImplementedError
        else:
            init_tokens = torch.ones([memory.size(0), self.max_len], dtype=torch.long).to(memory.device) * MASK
            preds, scores, fst_pass_attn_weights = self.nar_decode_from_mask(init_tokens, memory, memory_mask)

        if self.apply_two_pass_decode:
            mask = get_nart_mask(preds, EOS)
            length = torch.max(torch.sum(~mask, dim=-1)).item()
            preds = preds[:, :, :length]
            mask = mask[:, :, :length]
            preds, scores, sec_pass_attn_weights, indices = self.ar_rescoring(preds, mask, scores, memory, memory_mask)

        nbest = max(min(self.nbest, preds.size(1)), 1)
        nbest_preds = preds[:, :nbest]
        nbest_scores = scores[:, :nbest]

        if self.is_visual:
            assert len(uttid) == 1
            layer_wise_attn_wise(fst_pass_attn_weights, self.output_dir, uttid[0], suffix='fst')
            if self.apply_two_pass_decode:
                layer_wise_attn_wise(sec_pass_attn_weights, self.output_dir, uttid[0], nbest, indices, suffix='sec')              

        return self.nbest_translate(nbest_preds), nbest_scores


def layer_wise_attn_wise(attn_weight_dict, output_dir, uttid, nbest=1, indices=None, suffix='fst'):
    for layerid, attn_weight in attn_weight_dict.items():
        slf_attn_weights = attn_weight['slf_attn_weights']
        src_attn_weights = attn_weight['src_attn_weights']
        assert src_attn_weights.dim() == 4 and slf_attn_weights.dim() == 4
        if indices is not None:
            slf_attn_weights = torch.index_select(slf_attn_weights, 0, indices)
            src_attn_weights = torch.index_select(src_attn_weights, 0, indices)

        for i in range(nbest):
            mha_visualization_mean(
                src_attn_weights[i].detach().cpu().numpy(),
                save_name=os.path.join(output_dir, '_'.join([uttid, layerid, 'src', str(i), suffix+'.jpg'])))
            mha_visualization_mean(
                slf_attn_weights[i].detach().cpu().numpy(),
                save_name=os.path.join(output_dir, '_'.join([uttid, layerid, 'slf', str(i), suffix+'.jpg'])))
