import math
import torch
import torch.nn as nn
# import d2l

from src.net.multi_head_attention import *


class DecoderBlock(nn.Module):
    # The `i`-th block in the decoder
    def __init__(self, key_size, query_size, value_size, num_hiddens,
                 norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
                 dropout, i, **kwargs):
        super(DecoderBlock, self).__init__(**kwargs)
        self.i = i
        self.self_attn = MultiHeadAttention(key_size, query_size,
                                            value_size, num_hiddens,
                                            num_heads, dropout)
        self.addnorm1 = AddNorm(norm_shape, dropout)
        self.multihead_attn = MultiHeadAttention(key_size, query_size,
                                                 value_size, num_hiddens,
                                                 num_heads, dropout)
        self.addnorm2 = AddNorm(norm_shape, dropout)
        self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens,
                                   num_hiddens)
        self.addnorm3 = AddNorm(norm_shape, dropout)

    def forward(self, X, memory, previous):
        r"""
        previous + X is DecoderBlock's input
    Args:
        X: layer input.
        memory: encoder last layer output.
        previous: decoder previous state, During training, it`s None.
    Shape:
        Inputs:
        - X: :math:`(N, S\1, F)` where N is the batch size, S is the input sequence length, S is 1 when evaling, F is
          the feature dimension.
        - previous: :math:`(N, T, F)` where N is the batch size, T is the previous input concat sequence length,
          F is the feature dimension.
        Outputs:
        - output_seq: list [N * T]. where T is the recognized char ids length, N is the batch size.
        """
        enc_outputs, enc_valid_lens = memory[0], memory[1]
        # During training, all the tokens of any output sequence are processed
        # at the same time, so `previous` is `None` as initialized.
        # When decoding any output sequence token by token during prediction,
        # `previous` contains representations of the decoded output at
        # the `i`-th block up to the current time step
        if previous is None:
            key_values = X
        else:
            # print("previous[self.i]:", previous.shape)  # [1, t, 320]
            # print("X:", X.shape)  # ([1, 1, 320]
            key_values = torch.cat((previous, X), axis=1)

        if self.training:
            batch_size, num_steps, _ = X.shape
            # Shape of `dec_valid_lens`: (`batch_size`, `num_steps`), where
            # every row is [1, 2, ..., `num_steps`]
            dec_valid_lens = torch.arange(1, num_steps + 1,
                                          device=X.device).repeat(
                batch_size, 1)
        else:
            dec_valid_lens = None

        # Self-attention
        X2 = self.self_attn(X, key_values, key_values, dec_valid_lens)
        Y = self.addnorm1(X, X2)
        # Encoder-decoder attention. Shape of `enc_outputs`:
        # (`batch_size`, `num_steps`, `num_hiddens`)
        Y2 = self.multihead_attn(Y, enc_outputs, enc_outputs, enc_valid_lens)
        Z = self.addnorm2(Y, Y2)
        return self.addnorm3(Z, self.ffn(Z))


class TransformerDecoder(nn.Module):
    def __init__(self, vocab_size, key_size, query_size, value_size,
                 num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
                 num_heads, num_layers, dropout, **kwargs):
        super(TransformerDecoder, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        self.num_layers = num_layers
        self.embedding = nn.Embedding(vocab_size, num_hiddens)
        self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
        self.blks = nn.Sequential( )
        for i in range(num_layers):
            self.blks.add_module(
                "block" + str(i),
                DecoderBlock(key_size, query_size, value_size, num_hiddens,
                             norm_shape, ffn_num_input, ffn_num_hiddens,
                             num_heads, dropout, i))
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, enc_valid_lens, *args):
        return [enc_outputs, enc_valid_lens, [None] * self.num_layers]

    def forward(self, X, memory, previous=None, T_index=0):
        r"""
        Args:
            X: encoder input.
            memory: list of memory, include encoder output and encoder valid lengths.
            previous: decoder previous state, During training, it`s None.
            T_index: start index for PositionalEncoding, During training, it`s zero.
        Shape:
            Inputs:
                - X: :math:`(N, T)` where T is the target sequence length, N is the batch size.
                - memory: :list: [enc_memory, encode_memory_lengths, None * decoder_num_layers]
                - previous: :list:  num_layers * (N, T, F)
            Outputs:
                - output: :math:`(N, T, V)` where L is the target sequence length, N is the batch size, V is
                  vocabulary dimension.
                - now: :list: num_layers * (N, 1, F)
        """
        if not previous: previous = [None] * self.num_layers
        X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens), T_index)
        # X: (N, T, E)
        self._attention_weights = [[None] * len(self.blks) for _ in range(2)]
        now = []

        for i, blk in enumerate(self.blks):
            now.append(X)
            X = blk(X, memory, previous[i])
            # Decoder self-attention weights
            self._attention_weights[0][
                i] = blk.self_attn.attention.attention_weights.detach( )
            # Encoder-decoder attention weights
            self._attention_weights[1][
                i] = blk.multihead_attn.attention.attention_weights.detach( )
        return self.dense(X), now

    def cat_previous_and_now(self, previous, now):
        res = [None] * self.num_layers

        for i in range(self.num_layers):
            if previous[i] is None:
                res[i] = torch.clone(now[i])
            else:
                res[i] = torch.cat((previous[i], now[i]), axis=1)

        return res

    def greedy_search(self, memory, max_len=50) -> list:
        r"""batch_size = 1

        Args:
            memory: list of memory, include encoder output and previous encoder inputs.
            max_len: Max length to decode.
        Shape:
            Inputs:
                - memory: :list: [enc_memory, encode_memory_lengths]
            Outputs:
                - output_seq: :math:`(T)` where T is the target sequence length.
        """
        output_seq = []
        # self._attention_weights = []
        dec_x = torch.unsqueeze(
            torch.tensor([0], dtype=torch.long, device=memory[0].device),
            dim=0)  # tensor([[0]])
        previous = [None] * self.num_layers
        for t in range(max_len):
            Y, now = self(dec_x, memory, previous, t)
            previous = self.cat_previous_and_now(previous, now)
            # print("previous.shape:", previous[0].shape)  # [1, t, 320]
            # print("now.shape:", now[0].shape)  # [1, 1, 320]
            dec_x = Y.argmax(dim=2)
            pred = dec_x.type(torch.int32).squeeze(dim=1).item( )

            if pred == 0:  # 0 代表 eos
                break
            # self._attention_weights.append(self.attention.attention_weights.detach().cpu())
            output_seq.append(pred)
        return output_seq

    def beam_search(self, memory, max_len=50, beam=4, nbest=3):
        """batch_size = 1"""
        # raise NotImplementedError
        # todo
        # 没有处理state的累加，每次forward都会导致state[2]中的key_values在维度T上的累加，不同beam分支上的状态都被累加
        hyp = {'previous': [None] * self.num_layers, 'score_all': 0.0, 'score_norm': 0.0, 'yseq': [0]}
        hyps = [hyp]
        ended_hyps = []

        for t in range(max_len):
            hyps_best_kept = []
            for hyp in hyps:
                # dec_x init:[[0]]
                dec_x = torch.tensor([[hyp["yseq"][t]]], dtype=torch.long, device=memory[0].device)
                previous = hyp["previous"]

                local_scores, now = self(dec_x, memory, previous, t)

                local_scores = nn.functional.softmax(local_scores, dim=-1)
                # of the decoder at the next time step
                local_best_scores, local_best_ids = torch.topk(
                    local_scores, beam, dim=2)

                previous = self.cat_previous_and_now(previous, now)
                for j in range(beam):
                    new_hyp = {'previous': previous, 'score_all': hyp['score_all'] + local_best_scores[0, 0, j],
                               'yseq': [0] * (1 + len(hyp['yseq']))}
                    new_hyp['score_norm'] = new_hyp['score_all'] / (len(new_hyp["yseq"]) - 1)
                    new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
                    new_hyp['yseq'][len(hyp['yseq'])] = int(
                        local_best_ids[0, 0, j])
                    # will be (2 x beam) hyps at most
                    hyps_best_kept.append(new_hyp)

                hyps_best_kept = sorted(hyps_best_kept,
                                        key=lambda k: k['score_norm'],
                                        reverse=True)[:beam]
            hyps = hyps_best_kept
            if t == max_len - 1:
                for hyp in hyps:
                    hyp['yseq'].append(0)

            # add ended hypothes to a final list, and removed them from current hypothes
            # (this will be a probmlem, number of hyps < beam)
            remained_hyps = []
            for hyp in hyps:
                if hyp['yseq'][-1] == 0:
                    # hyp['score'] += (i + 1) * penalty
                    ended_hyps.append(hyp)
                else:
                    remained_hyps.append(hyp)
            hyps = remained_hyps
            if len(hyps) <= 0:
                break

            # print("t:", t, "previous:", remained_hyps[0]["previous"][0].shape)

        nbest_hyps = sorted(ended_hyps, key=lambda k: k['score_norm'], reverse=True)[
                     :min(len(ended_hyps), nbest)]

        return nbest_hyps[0]['yseq'][1:-1]

    @property
    def attention_weights(self):
        return self._attention_weights


class TransformerDecoder_torch(nn.Module):
    def __init__(self, vocab_size, num_hiddens,
                 num_heads, num_layers, dropout, **kwargs):
        super(TransformerDecoder_torch, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        self.num_layers = num_layers
        self.embedding = nn.Embedding(vocab_size, num_hiddens)
        self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
        decoder_layer = nn.TransformerDecoderLayer(d_model=num_hiddens, nhead=num_heads)
        decoder_norm = nn.LayerNorm(num_hiddens)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers, decoder_norm)
        self.dense = nn.Linear(num_hiddens, vocab_size)

        # self._reset_parameters()

        self.d_model = num_hiddens
        self.nhead = num_heads

    def forward(self, tgt, memory, tgt_key_padding_mask, memory_key_padding_mask=None, tgt_mask=None):
        tgt, memory = tgt.permute(1, 0, 2), memory.permute(1, 0, 2)
        tgt = self.pos_encoding(self.embedding(tgt) * math.sqrt(self.num_hiddens))
        # tgt::math: `(T, N, E)`.
        output = self.decoder(tgt, memory, tgt_mask=tgt_mask,
                              tgt_key_padding_mask=tgt_key_padding_mask,
                              memory_key_padding_mask=memory_key_padding_mask)

        return self.dense(output)

    # def _reset_parameters(self):
    #     r"""Initiate parameters in the transformer model."""
    #
    #     for p in self.parameters():
    #         if p.dim() > 1:
    #             nn.init.xavier_uniform_(p)
