import torch
import torch.nn as nn
from src.net.attention import AdditiveAttention


class BaseDecoder(nn.Module):
    def __init__(self, vocab_size, embed_size, hidden_size, output_size, n_layers, dropout=0.2, share_weight=False):
        super(BaseDecoder, self).__init__()

        self.embedding = nn.Embedding(vocab_size, embed_size, padding_idx=0)

        self.lstm = nn.LSTM(
            input_size=hidden_size,
            hidden_size=hidden_size,
            num_layers=n_layers,
            batch_first=True,
            dropout=dropout if n_layers > 1 else 0
        )

        self.output_proj = nn.Linear(hidden_size, output_size)

        if share_weight:
            self.embedding.weight = self.output_proj.weight

    def forward(self, inputs, length=None, hidden=None):

        embed_inputs = self.embedding(inputs)

        if length is not None:
            sorted_seq_lengths, indices = torch.sort(length, descending=True)
            embed_inputs = embed_inputs[indices]
            embed_inputs = nn.utils.rnn.pack_padded_sequence(
                embed_inputs, sorted_seq_lengths.cpu(), batch_first=True)

        self.lstm.flatten_parameters()
        outputs, hidden = self.lstm(embed_inputs, hidden)

        if length is not None:
            _, desorted_indices = torch.sort(indices, descending=False)
            outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
            outputs = outputs[desorted_indices]

        outputs = self.output_proj(outputs)

        return outputs, hidden


class LASDecoderBase(nn.Module):
    def __init__(self, **kwargs):
        super(LASDecoderBase, self).__init__(**kwargs)
        pass

    def forward(self, X, enc_states, enc_len):
        """ Inputs from encoder (enc_states, enc_len) and decoder input
        Arguments
        ---------
        X : tensor
            shape :(batch_size, num_chars)
        enc_states : tensor
            encoder states output
            shape :(batch_size, num_steps, enc_num_hidden)
        enc_len : tensor
            encoder states output length
            shape :(batch_size)

        Return
        ---------
        outputs : tensor
            shape :(num_chars, batch_size, vocab_size)
        """
        raise NotImplementedError

class LASLstmDecoder(LASDecoderBase):
    def __init__(self, vocab_size, embed_size, encoder_hidden_size, decoder_hidden_size, n_layers,
                 dropout=0, **kwargs):
        super(LASLstmDecoder, self).__init__(**kwargs)
        self.n_layers = n_layers
        self.decoder_hidden_size = decoder_hidden_size
        self.attention = AdditiveAttention(decoder_hidden_size, decoder_hidden_size,
                                           decoder_hidden_size, dropout)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.LSTM(embed_size + encoder_hidden_size, decoder_hidden_size, n_layers,
                           dropout=dropout, batch_first=True)
        self.dense = nn.Linear(decoder_hidden_size, vocab_size)

    def forward_step(self, inp, enc_states, enc_len, hs):
        """One step of forward pass process.

        Arguments
        ---------
        inp : tensor
            embed layer output of previous char
            shape :(batch_size, embed_size)
        hs : tensor or tuple
            RNN hidden state, tensor for GRU ,or tuple of tensor for LSTM
            shape :(num_layers, batch_size, enc_num_hidden)
        enc_states : tensor
            encoder states output
            shape :(batch_size, num_steps, enc_num_hidden)
        enc_len : tensor
            encoder states output length
            shape :(batch_size)

        Return
        ---------
        outputs : tensor
            shape :(batch_size, num_chars:1, vocab_size)
        hs : tensor or tuple
            new RNN hidden state, tensor for GRU ,or tuple of tensor for LSTM
            shape :(num_layers, batch_size, enc_num_hidden)
        """

        query = hs[0][-1].unsqueeze(1)
        # Shape of `context`: (`batch_size`, 1, `num_hiddens`)
        context = self.attention(query, enc_states, enc_states,
                                 enc_len)
        # Concatenate on the feature dimension
        cell_inp = torch.cat((context, torch.unsqueeze(inp, dim=1)), dim=-1)
        # Reshape `x` as (1, `batch_size`, `embed_size` + `num_hiddens`)
        cell_out, hs = self.rnn(cell_inp, hs)

        outputs = self.dense(cell_out)
        # print("outputs:", outputs.shape)

        return outputs, hs

    def forward(self, X, enc_states, enc_len):
        """ Inputs from encoder (enc_states, enc_len) and decoder input
        Arguments
        ---------
        X : tensor
            shape :(batch_size, num_chars)
        enc_states : tensor
            encoder states output
            shape :(batch_size, num_steps, enc_num_hidden)
        enc_len : tensor
            encoder states output length
            shape :(batch_size)
        hs : tensor or tuple
            RNN hidden state, tensor for GRU ,or tuple of tensor for LSTM
            shape :(num_layers, batch_size, enc_num_hidden)

        Return
        ---------
        outputs : tensor
            shape :(num_chars, batch_size, vocab_size)
        hs : tensor or tuple
            new RNN hidden state, tensor for GRU ,or tuple of tensor for LSTM
            shape :(num_layers, batch_size, enc_num_hidden)
        """

        batch_size = enc_states.shape[0]
        hs = (
            enc_states.new_zeros((self.n_layers, batch_size, self.decoder_hidden_size)),
            enc_states.new_zeros((self.n_layers, batch_size, self.decoder_hidden_size)))
        # Shape of the output X: (num_steps, batch_size, embed_size)
        X = self.embedding(X).permute(1, 0, 2)

        outputs = []
        # self._attention_weights = []
        for x in X:  # iterate in num_steps
            # x : shape(batch_size, embed_size)
            out, hs = self.forward_step(
                x, enc_states, enc_len, hs)
            outputs.append(out)
            # self._attention_weights.append(self.attention.attention_weights.detach().cpu())
        # After fully-connected layer transformation, shape of `outputs`:
        # (`num_steps`, `batch_size`, `vocab_size`)
        outputs = torch.cat(outputs, dim=1)

        return outputs  # t * b * v

    def greedy_search(self, inputs, enc_states, enc_len, maxlen=30):
        """ greedy search for LSTM, batch size is 1.
        Arguments
        ---------
        inputs : tensor
            batch_size is 1, just for device info.
        enc_states : tensor
            encoder states output
            shape :(batch_size, num_steps, enc_num_hidden)
        enc_len : tensor
            encoder states output length
            shape :(batch_size)

        Return
        ---------
        outputs : list
            shape :(num_chars)
        """
        # input : batch_size=1
        dec_x = torch.tensor([0], dtype=torch.long, device=inputs.device)  # (batch_size(1))

        # decoder initial hidden state :hs
        hs = (
            enc_states.new_zeros((self.n_layers, 1, self.decoder_hidden_size)),
            enc_states.new_zeros((self.n_layers, 1, self.decoder_hidden_size)))

        # (t)
        output_seq = []
        self._attention_weights = []
        for _ in range(maxlen):
            x = self.embedding(dec_x)
            Y, hs = self.forward_step(x, enc_states, enc_len, hs)

            # We use the token with the highest prediction likelihood as the input
            # of the decoder at the next time step
            dec_x = Y.argmax(dim=2).squeeze(dim=1)

            pred = dec_x.type(torch.int32).item()

            if pred == 0:  # 0 代表 eos
                break
            self._attention_weights.append(self.attention.attention_weights.detach( ).cpu( ))
            output_seq.append(pred)

        return output_seq

    def beam_search(self, inputs, enc_states, enc_len, maxlen=30, beam=4, nbest=3):

        hs = (
            enc_states.new_zeros((self.n_layers, 1, self.decoder_hidden_size)),
            enc_states.new_zeros((self.n_layers, 1, self.decoder_hidden_size)))

        hyp = {'score_all': 0.0, 'score_norm': 0.0, 'yseq': [0], 'h_prev': hs}
        hyps = [hyp]
        ended_hyps = []

        for i in range(maxlen):
            hyps_best_kept = []
            for hyp in hyps:

                dec_x = torch.tensor([hyp["yseq"][i]], dtype=torch.long, device=inputs.device)
                x = self.embedding(dec_x)
                hs = hyp["h_prev"]
                local_scores, hs = self.forward_step(x, enc_states, enc_len, hs)
                # local_scores = torch.nn.functional.log_softmax(local_scores, dim=-1)
                local_scores = torch.nn.functional.softmax(local_scores, dim=-1)
                # We use the token with the highest k prediction likelihood as the input
                # of the decoder at the next time step
                local_best_scores, local_best_ids = torch.topk(
                    local_scores, beam, dim=2)

                for j in range(beam):
                    new_hyp = {'h_prev': hs, 'score_all': hyp['score_all'] + local_best_scores[0, 0, j],
                               'yseq': [0] * (1 + len(hyp['yseq']))}
                    new_hyp['score_norm'] = new_hyp['score_all'] / (len(new_hyp["yseq"]) - 1)
                    new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
                    new_hyp['yseq'][len(hyp['yseq'])] = int(
                        local_best_ids[0, 0, j])
                    # will be (2 x beam) hyps at most
                    hyps_best_kept.append(new_hyp)

                hyps_best_kept = sorted(hyps_best_kept,
                                        key=lambda k: k['score_norm'],
                                        reverse=True)[:beam]
            hyps = hyps_best_kept
            if i == maxlen - 1:
                for hyp in hyps:
                    hyp['yseq'].append(0)

            # add ended hypothes to a final list, and removed them from current hypothes
            # (this will be a probmlem, number of hyps < beam)
            remained_hyps = []
            for hyp in hyps:
                if hyp['yseq'][-1] == 0:
                    # hyp['score'] += (i + 1) * penalty
                    ended_hyps.append(hyp)
                else:
                    remained_hyps.append(hyp)
            hyps = remained_hyps
            if len(hyps) <= 0:
                break

        nbest_hyps = sorted(ended_hyps, key=lambda k: k['score_norm'], reverse=True)[
                     :min(len(ended_hyps), nbest)]

        return nbest_hyps[0]['yseq'][1:-1]

    @property
    def attention_weights(self):
        return self._attention_weights
