import torch
from torch import nn
import torch.nn.functional as F
from src.net.loss import nll_loss

from src.utils.build_model import build_encoder, build_decoder


class Transformer(nn.Module):
    def __init__(self, config):
        super(Transformer, self).__init__( )

        self.config = config
        self.encoder = build_encoder(config)
        self.decoder = build_decoder(config)
        self.nll_loss = nll_loss
        self.ctc_weight = config.enc.ctc_weight if config.enc.ctc_weight else 0.0
        self.ce_weight = 1.0 - self.ctc_weight
        self.encoder_project_layer = None

        # multask learning (loss_encoder)
        if self.ctc_weight > 0.0:
            self.ctc_loss = nn.CTCLoss()
            self.encoder_project_layer = nn.Sequential(nn.Tanh(),
                                                       nn.Linear(self.config.enc.output_size, self.config.vocab_size))

    def forward(self, enc_inputs, enc_inputs_len, tokens, tokens_length):
        enc_outputs, encode_output_lengths = self.encoder(enc_inputs, enc_inputs_len)

        tokens_with_bos, token_with_bos_lens = F.pad(tokens, pad=[1, 0, 0, 0], value=0), tokens_length.add(1)
        tokens_with_eos, token_with_eos_lens = F.pad(tokens, pad=[0, 1, 0, 0], value=0), tokens_length.add(1)

        memory = [enc_outputs, encode_output_lengths]
        dec_output = self.decoder(tokens_with_bos, memory)[0]
        dec_output = torch.nn.functional.log_softmax(dec_output, dim=-1)
        loss = self.nll_loss(dec_output, tokens_with_eos,
                             length=token_with_eos_lens) * self.ce_weight

        if self.ctc_weight > 0.0:
            ctc_output = self.encoder_project_layer(enc_outputs)
            ctc_output = torch.transpose(ctc_output, 0, 1)
            ctc_output = ctc_output.log_softmax(2)
            loss += self.ctc_loss(ctc_output, tokens.int(),
                                  encode_output_lengths, tokens_length.int()) * self.ctc_weight

        return loss

    def recognize(self, inputs, inputs_length):
        r"""
    Args:
        inputs: fbank feature
        inputs_length: input valid length
    Shape:
        Inputs:
        - inputs: :math:`(N, S, F)` where N is the batch size, S is the feature sequence length, E is
          the feature dimension.
        - inputs_length: :math:`(N)` where N is the batch size
        Outputs:
        - output_seq: list [N * T]. where T is the recognized char ids length, N is the batch size.
        """
        enc_outputs, encode_output_lengths = self.encoder(inputs, inputs_length)

        output_seq = []
        for i in range(inputs.shape[0]):
            memory = [enc_outputs[i].unsqueeze(0), encode_output_lengths[i].unsqueeze(0)]
            out = self.decoder.greedy_search(memory)
            output_seq.append(out)
        # shape ( b * len)
        # print("output_seq:", output_seq)

        return output_seq

    def get_ctc_post(self, inputs, inputs_length, apply_softmax=False):
        enc_states, output_lengths = self.encoder(inputs, inputs_length)
        encoder_output = self.encoder_project_layer(enc_states)
        if apply_softmax:
            encoder_output = self.softmax(encoder_output)

        return encoder_output, output_lengths

    def recognize_ctc(self, inputs, inputs_length):
        encoder_post, output_lengths = self.get_ctc_post(inputs, inputs_length)

        preds = torch.argmax(encoder_post, -1)

        ans = [[int(j) for j in i if j > 0]
               for i in preds]

        return ans

    def recognize_beam(self, inputs, inputs_length, beam=4, nbest=3):
        enc_outputs, encode_output_lengths = self.encoder(inputs, inputs_length)

        output_seq = []
        for i in range(inputs.shape[0]):
            memory = [enc_outputs[i].unsqueeze(0), encode_output_lengths[i].unsqueeze(0)]
            out = self.decoder.beam_search(memory, beam=beam, nbest=nbest)
            output_seq.append(out)
        # shape ( b * len)
        # print("output_seq:", output_seq)

        return output_seq


class Transformer_torch(nn.Module):
    def __init__(self, config):
        super(Transformer_torch, self).__init__( )

        self.config = config
        self.encoder = build_encoder(config)
        self.decoder = build_decoder(config)
        self.nll_loss = nll_loss

    def forward(self, enc_X, enc_len, tokens, tokens_length):
        r"""
        Args:
            enc_X: input
            enc_len: input valid length
            tokens: label
            tokens_length: input valid length
        Shape:
            Inputs:
            - enc_X: :math:`(N, S, E)` where L is the source sequence length, N is the batch size, E is
              the embedding dimension.
            - enc_len: :math:`(N)` where N is the batch size
            - tokens: :math:`(N, T)` where T is the target sequence length, N is the batch size, E is
              the embedding dimension.
            - tokens_length
            Outputs:
            dec_output: :math:`(T, N, V)` where L is the target sequence length, N is the batch size, E is
              the embedding dimension.
        """
        memory, encode_output_lengths = self.encoder(enc_X, enc_len)

        tokens_with_bos, token_with_bos_lens = F.pad(tokens, pad=[1, 0, 0, 0], value=0), tokens_length.add(1)
        tokens_with_eos, token_with_eos_lens = F.pad(tokens, pad=[0, 1, 0, 0], value=0), tokens_length.add(1)

        memory_key_padding_mask, tgt_key_padding_mask, tgt_mask = self._compute_dec_mask(memory, encode_output_lengths,
                                                                                         tokens_with_bos,
                                                                                         token_with_bos_lens)
        dec_output = self.decoder(tokens_with_bos, memory, tgt_mask=tgt_mask,
                                  tgt_key_padding_mask=tgt_key_padding_mask,
                                  memory_key_padding_mask=memory_key_padding_mask)
        dec_output = torch.nn.functional.log_softmax(dec_output, dim=-1)

        return self.nll_loss(dec_output, tokens_with_eos,
                             length=token_with_eos_lens)

    def _compute_dec_mask(self, memory, encode_output_lengths, tokens_with_bos, token_with_bos_lens):
        S = memory.shape[1]
        memory_key_padding_mask = torch.arange(S, device=memory.device)[None, :] >= encode_output_lengths[:, None]
        tgt_mask = torch.triu(
            torch.ones(S, S, device=memory.device), diagonal=1).bool( )
        T = tokens_with_bos.shape[1]
        tgt_key_padding_mask = torch.arange(T, device=tokens_with_bos.device)[None, :] >= token_with_bos_lens[:, None]
        return memory_key_padding_mask, tgt_key_padding_mask, tgt_mask

    def recognize(self, inputs, inputs_length):
        enc_outputs, encode_output_lengths = self.encoder(inputs, inputs_length)

        output_seq = []
        for i in range(inputs.shape[0]):
            state = [enc_outputs[i].unsqueeze(0), encode_output_lengths[i].unsqueeze(0),
                     [None] * self.decoder.num_layers]
            out = self.decoder.greedy_search(state)
            output_seq.append(out)
        # shape ( b * len)
        # print("output_seq:", output_seq)

        return output_seq
