import os
import oneflow as torch
import math
import logging
import numpy as np
import oneflow.nn.functional as F
from oasr.recognize.base import Recognizer
from oasr.data import BLK, BOS, EOS
from dhelper import MergeDuplicatedPaths
from oasr.vis import rnnt_path_visualization

logger = logging.getLogger(__name__)


class TransducerRecognizer(Recognizer):
    def __init__(self, model, lm=None, lm_weight=0.1, beam_width=5, max_len=60, nbest=1,
                 idx2unit=None, self_fusion_weight=0.0, path_fusion=False,
                 one_step_constrain=False, ngpu=1, mode='beam', apply_rescoring=False,
                 is_visualized=False, output_dir=None):
        super(TransducerRecognizer, self).__init__(model, idx2unit, lm, lm_weight, ngpu)

        self.nbest = nbest
        self.beam_width = beam_width
        self.max_len = max_len
        self.mode = mode
        self.self_fusion_weight = self_fusion_weight if hasattr(model, 'lm_project_layer') else 0.0
        if self.self_fusion_weight > 0.0: logger.info('Apply Self Fusion Weight %.2f During Inference!' % self.self_fusion_weight)
        self.one_step_constrain = one_step_constrain
        if self.one_step_constrain: logger.info('Apply One Step Constrain!')

        self.path_fusion = path_fusion
        if self.path_fusion:
            logger.info('Apply Path Fusion for Transducer Recognizer!')

        self.is_visualized = is_visualized
        if self.is_visualized: logger.info('[TransducerRecognizer] Apply Visualization During Inference')
        self.vis_output_dir = os.path.join(output_dir, 'pic')

        if not os.path.exists(self.vis_output_dir):
            os.makedirs(self.vis_output_dir)

        self.apply_rescoring = True if apply_rescoring and hasattr(self.model, 'second_decoder') else False
        if self.apply_rescoring: logger.info('[TransducerRecognizer] Apply Rescoring!')

    def recognize(self, inputs, inputs_mask, uttids=None):
        if self.mode == 'greedy':
            return self.greed_search(inputs, inputs_mask, uttids)
        elif self.mode == 'beam':
            return self.beam_seach(inputs, inputs_mask, uttids)
        else:
            raise ValueError

    def greed_search(self, inputs, inputs_mask, uttids):

        assert inputs.size(0) == 1 and len(uttids) == 1

        with torch.no_grad():
            enc_states, _ = self.encode(inputs, inputs_mask)

        hidden = None
        preds = torch.ones([1, 1], dtype=torch.long).to(enc_states.device) * BLK
        scores = torch.zeros([1], dtype=torch.float).to(enc_states.device)

        points_recoder = []
        t = 0
        u = torch.zeros([1], dtype=torch.long).to(enc_states.device)
        enc_lens = enc_states.size(1)
        for _ in range(enc_lens +  self.max_len):

            enc_state = enc_states[:, t].unsqueeze(1)
            # log_probs [b, 1, 1, v]
            with torch.no_grad():
                log_prob, new_hidden = self.decode_step(preds, u, enc_state, hidden)

            log_prob = log_prob.squeeze(1)

            if self.lm is not None:
                raise NotImplementedError

            score, best_pred = torch.topk(log_prob.detach().squeeze(1), 1, dim=-1)

            scores += score.squeeze(0)

            if self.one_step_constrain:
                t += 1

            if int(best_pred) == BLK:
                if not self.one_step_constrain:
                    t += 1
                if t >= int(enc_lens):
                    break
            else:
                u += 1
                preds = torch.cat([preds, best_pred], dim=-1)
                hidden = new_hidden

            if t >= int(enc_lens):
                break

            points_recoder.append((t, u.item()))

        if t < enc_states.size(1) - 1:
            points_recoder.append((enc_states.size(1) - 1, u.item()))

        if self.is_visualized:
            rnnt_path_visualization(
                points_recoder, os.path.join(self.vis_output_dir, '%s-transducer.jpg' % uttids[0])
            )


        return self.nbest_translate(preds.reshape(1, 1, -1)[:,:,1:], u.unsqueeze(0)), scores.unsqueeze(0)

    def beam_seach(self, inputs, inputs_mask, uttids):

        assert inputs.size(0) == 1

        enc_states, enc_mask = self.encode(inputs, inputs_mask)

        scores = torch.FloatTensor([0.0] + [-float('inf')] * max(0, self.beam_width-1))
        scores = scores.unsqueeze(1).to(enc_states.device)

        preds = torch.ones([self.beam_width, 1], dtype=torch.long).to(enc_states.device) * BLK
        beam_u = torch.zeros([self.beam_width], dtype=torch.long).to(enc_states.device)
        beam_t = torch.zeros([self.beam_width], dtype=torch.long).to(enc_states.device)

        beam_status = torch.LongTensor([0] * self.beam_width).to(enc_states.device) > 0
        
        max_time_steps = enc_states.size(1)

        last_hidden = None if self.model.decoder.model_type != 'rnn' \
            else self.model.decoder.init_hidden_states(self.beam_width, preds.device)

        if self.lm is not None:
            if self.lm.model_type == 'transformer_lm':
                last_lm_hidden = None 
            else:
                self.lm.init_hidden_states(self.beam_width, preds.device)

        # beam 
        points_recoder = torch.zeros([self.beam_width, 1, 2], dtype=torch.long).to(enc_states.device)

        for step in range(max_time_steps + self.max_len):
            with torch.no_grad():

                in_num_hyps = preds.size(0)
                keep_preds_len = min(torch.max(beam_u).item() + 1, preds.size(1))
                preds = preds[:, :keep_preds_len]

                enc_state = select_encoder_states_based_index(enc_states, beam_t)
                log_probs, cur_hidden = self.decode_step(preds, beam_u, enc_state.unsqueeze(1), last_hidden)
                if self.lm is not None:
                    lm_log_probs, cur_lm_hidden = self.lm_decode_with_index(preds, beam_u, last_lm_hidden)
                    lm_log_probs = lm_log_probs.squeeze(1)
                    log_probs += self.lm_weight * lm_log_probs

                beam_scores, beam_preds = torch.topk(log_probs.squeeze(1).detach(), k=self.beam_width, dim=-1)
                # update scores
                # [in_num_hyps, beam_width]
                beam_accum_scores = scores + beam_scores
                beam_accum_scores = beam_accum_scores.view(-1)

                # update beam_u
                add_ones = torch.ones_like(beam_preds.view(-1), dtype=torch.long)
                blk_mask = beam_preds.view(-1) == BLK
                add_ones.masked_fill_(blk_mask, 0)

                # update beam_u
                beam_u = beam_u.unsqueeze(1).repeat([1, self.beam_width]).view(-1)
                beam_u += add_ones

                # update decode step
                beam_t = beam_t.repeat([self.beam_width])
                if self.one_step_constrain:
                    beam_t += torch.ones_like(beam_preds.view(-1), dtype=torch.long)
                else:
                    beam_t += (1 - add_ones)

                # update preds
                preds = preds.unsqueeze(1).repeat([1, self.beam_width, 1])
                preds = preds.reshape([in_num_hyps * self.beam_width, -1])
                pad_value = torch.ones_like(beam_preds.view(-1, 1)) * BLK
                preds = torch.cat([preds, pad_value], dim=-1)
                preds = fill_tensor_based_index(preds, beam_u, beam_preds.view(-1), blank=BLK)

                if self.is_visualized:
                    points = torch.cat([beam_t.view(-1, 1), beam_u.view(-1, 1)], dim=-1)
                    points_recoder = points_recoder.unsqueeze(1).view([self.beam_width * self.beam_width, -1, 2])
                    points_recoder = torch.cat([points_recoder, points.unsqueeze(0)], dim=1)

                # pruning
                if self.path_fusion:
                    preds, beam_u, scores, best_k_indices = MergeDuplicatedPaths(preds, beam_u, beam_accum_scores, topk=self.beam_width)
                    scores = scores.view(-1, 1)
                else:
                    scores, best_k_indices = torch.topk(beam_accum_scores, k=self.beam_width)
                    scores = scores.view(-1, 1)

                    preds = torch.index_select(preds, dim=0, index=best_k_indices)
                    beam_u = torch.index_select(beam_u, dim=0, index=best_k_indices)

                beam_t = torch.index_select(beam_t, dim=0, index=best_k_indices)
                over_boundary_mask = beam_t >= (max_time_steps - 1)
                beam_t.masked_fill_(over_boundary_mask, max_time_steps - 1)

                if self.model.decoder.model_type == 'rnn':
                    mix_hidden = combine_curr_and_last_hidden(last_hidden, cur_hidden, beam_preds.view(-1), self.beam_width)
                    last_hidden = select_hidden(mix_hidden, best_k_indices)

                if self.lm is not None:
                    if self.lm.model_type != 'transformer_lm':
                        mix_lm_hidden = combine_curr_and_last_hidden(last_lm_hidden, cur_lm_hidden, beam_preds.view(-1), self.beam_width)
                        last_lm_hidden = select_hidden(mix_lm_hidden, best_k_indices)

                new_token_equal_to_blk = torch.index_select(blk_mask, dim=0, index=best_k_indices)
                beam_status = (beam_t == max_time_steps - 1) & new_token_equal_to_blk

                if self.is_visualized:
                    points_recoder = torch.index_select(points_recoder, dim=0, index=best_k_indices)

                if beam_status.int().sum() == self.beam_width:
                    break

        # if self.is_visualized:
        #     rnnt_path_visualization(points_recoder, os.path.join(self.vis_output_dir, '%s-rnnt-route.jpg' % uttids[0]))

        if self.apply_rescoring:
            preds, scores = self.rescoring(preds, beam_u, enc_states, enc_mask)

        return self.nbest_translate(preds.unsqueeze(0)[:,:min(self.nbest, self.beam_width),1:], beam_u.unsqueeze(0)), scores.reshape(-1, self.beam_width)[:, :min(self.nbest, self.beam_width)]

    def decode_step(self, preds, preds_len, enc_state, hidden=None):

        dec_state, hidden = self.decode(preds, preds_len, hidden)
        logits = self.joint(enc_state, dec_state)
        log_probs = F.log_softmax(logits, dim=-1)

        if self.self_fusion_weight > 0.0:
            log_probs += self.self_fusion_weight * F.log_softmax(self.model.lm_project_layer(dec_state), dim=-1)
            
        return log_probs, hidden

    def encode(self, inputs, inputs_mask):
        enc_inputs, enc_mask = self.model.frontend(inputs, inputs_mask)
        enc_states, enc_mask, _, _ = self.model.encoder.inference(enc_inputs, enc_mask)

        if self.model.apply_look_back_ahead:
            enc_states = F.pad(enc_states, pad=(0, 0, self.model.lookback_steps, self.model.lookahead_steps), value=0.0)
            enc_states = enc_states.transpose(1, 2)
            enc_states = self.model.lookahead_conv(enc_states)
            enc_states = enc_states.transpose(1, 2)

        # enc_states, enc_mask, _, _ = self.model.encode.inference(inputs, inputs_mask)
        return enc_states, enc_mask

    def decode(self, preds, preds_len, hidden=None):
        if self.model.decoder.model_type in ['transformer', 'state-less', 'conv-state-less']:
            dec_states = self.model.decoder.inference(preds)
            dec_states = select_tensor_based_index(dec_states, preds_len).unsqueeze(1)
        elif self.model.decoder.model_type == 'rnn':
            dec_input = select_tensor_based_index(preds, preds_len).unsqueeze(-1)
            dec_states, hidden = self.model.decoder.inference(dec_input, hidden)
        else:
            raise ValueError(self.model.decoder.model_type)
        return dec_states, hidden

    def joint(self, enc_state, dec_state):
        return self.model.joint.inference(enc_state, dec_state)

    def lm_decode(self, preds, index, hidden=None):
        if self.lm.model_type == 'transformer_lm':
            log_probs = self.lm.predict(preds, last_frame=False)
            log_probs = select_tensor_based_index(log_probs, index)
        else:
            preds = select_tensor_based_index(preds, index).unsqueeze(-1)
            log_probs, hidden = self.lm.predict(preds, hidden)
        return log_probs, hidden

    def nbest_translate(self, nbest_preds, nbest_length):
        assert nbest_preds.dim() == 3
        assert nbest_length.dim() == 2
        batch_size, nbest, lens = nbest_preds.size()
        results = []
        for b in range(batch_size):
            nbest_list = []
            for n in range(nbest):
                pred = []
                for i in range(nbest_length[b][n].item()):
                    token = int(nbest_preds[b, n, i])
                    if token == EOS:
                        continue
                    pred.append(self.idx2unit[token])
                nbest_list.append(' '.join(pred))
            results.append(nbest_list)
        return results

    def rescoring(self, preds, preds_length, memory, memory_mask):
        """ scoring the predicted sequence in an autoregressive way
        """

        beam_width, max_lens = preds.size()
        preds = preds.reshape(-1, max_lens)

        _, max_time_len, model_size = memory.size()
        beam_memory = memory.unsqueeze(1).repeat([1, beam_width, 1, 1]).reshape([-1, max_time_len, model_size])
        beam_memory_mask = memory_mask.unsqueeze(1).repeat([1, beam_width, 1, 1]).reshape([-1, max_time_len])

        bos_pad = torch.ones([beam_width, 1], dtype=torch.long) * BOS
        preds_in = torch.cat([bos_pad.to(preds.device), preds[:,1:]], dim=-1)
        preds_out = insert_eos_based_length(preds[:, 1:], preds_length)
        ar_logits, _ = self.model.second_decoder(preds_in, beam_memory, beam_memory_mask)
        # [batch * beam, max_lens, vocab_size]
        as_log_probs = F.log_softmax(ar_logits, dim=-1)
        vocab_size = as_log_probs.size(-1)

        base_index = torch.arange(beam_width * max_lens, device=memory.device)
        bias_index = preds_out.reshape(-1)

        index = base_index * vocab_size + bias_index
        scores = torch.index_select(as_log_probs.reshape(-1), dim=-1, index=index)
        scores = scores.reshape(1, beam_width, max_lens)
        scores.masked_fill_(preds_out == BLK, 0.0)

        scores = torch.sum(scores, dim=-1)
        
        scores, indices = torch.sort(scores, dim=-1, descending=True)
        preds = preds[indices.reshape(-1)]

        return preds, scores.reshape(-1, 1)


def select_tensor_based_index(tensor, index):
    # tensor: [b, t, v]
    # index: [b]
    # return [b, t, v]
    assert tensor.dim() >= 2
    assert index.dim() == 1

    batch_size = tensor.size(0)
    tensor_len = tensor.size(1)

    base_index = torch.arange(batch_size, device=tensor.device) * tensor_len
    indices = base_index + index

    if tensor.dim() == 2:
        select_tensor = torch.index_select(tensor.reshape(batch_size * tensor_len), 0, indices.long())
    else:
        assert tensor.dim() == 3
        select_tensor = torch.index_select(tensor.reshape(batch_size * tensor_len, tensor.size(-1)), 0, indices.long())

    return select_tensor


def select_encoder_states_based_index(tensor, index):
    # tensor: [b, t, v]
    # index: [b]
    # return [b, t, v]
    assert tensor.dim() == 3
    assert index.dim() == 1

    b, t, v = tensor.size()

    base_index = torch.arange(b, device=tensor.device)
    indices = base_index + index

    select_tensor = torch.index_select(tensor.reshape(b * t, v), 0, indices.long())
    return select_tensor


def fill_tensor_based_index(tensor, index, value, blank=BLK):

    assert tensor.dim() == 2
    assert value.dim() == 1
    assert value.size(0) == tensor.size(0)
    assert index.size(0) == value.size(0)
    assert tensor.size(1) >= int(torch.max(index))

    for b in range(index.size(0)):
        pos = int(index[b])
        if int(value[b]) == blank:
            continue
        else:
            tensor[b, pos] = value[b]
    return tensor

def insert_eos_based_length(tokens, length):
    batch_size = tokens.size(0)
    tokens = F.pad(tokens, pad=(0, 1), value=BLK)
    new_tokens = torch.ones_like(tokens) * EOS
    for b in range(batch_size):
        pos = length[b]
        new_tokens[b, :pos] = tokens[b,:pos]
        new_tokens[b, pos+1:] = tokens[b,pos:-1]
    return new_tokens

def select_hidden(tensor_list, indices):

    def select(tensor, indices):
        tensor = tensor.transpose(0, 1)
        new_tensor = torch.index_select(tensor, dim=0, index=indices)
        new_tensor = new_tensor.transpose(0, 1).contiguous()
        return new_tensor

    hidden = []
    for i in range(len((tensor_list))):
        if isinstance(tensor_list[i], tuple):
            h = select(tensor_list[i][0], indices)
            c = select(tensor_list[i][1], indices)
            hidden.append((h, c))
        else:
            hidden.append(tensor_list[i], indices)

    return hidden


def combine_curr_and_last_hidden(last_hidden, cur_hidden, last_pred, beam_width):

    def propose(tensor, beam_width, mask):
        n, _, v = tensor.size()
        tensor = tensor.transpose(0, 1)
        tensor = tensor.unsqueeze(1).repeat([1, beam_width, 1, 1]).reshape(-1, n, v)
        tensor.masked_fill_(mask, 0.0)
        return tensor.transpose(0, 1)

    hidden = []
    blk_mask = last_pred == BLK
    blk_mask = blk_mask.reshape(-1, 1, 1)
    for (last_h, cur_h) in zip(last_hidden, cur_hidden):
        if isinstance(last_h, tuple) and isinstance(cur_h, tuple):
            lh = propose(last_h[0], beam_width, ~blk_mask)
            lc = propose(last_h[1], beam_width, ~blk_mask)
            ch = propose(cur_h[0], beam_width, blk_mask)
            cc = propose(cur_h[1], beam_width, blk_mask)
            hidden.append((lh + ch, lc + cc))
        else:
            lh = propose(last_h, beam_width, ~blk_mask)
            ch = propose(cur_h, beam_width, blk_mask)
            hidden.append(lh + ch)

    return hidden