import torch
import torch.nn as nn
from utils.rnn_factory import rnn_factory
from modules.global_attn import GlobalAttention
from modules.decoders.base_decoder import BaseDecoder
from utils.common import tuple_map


class RNNCellState:
    """
    用来存储decoder状态

    hidden : 对于LSTM是(hidden,cell), 对于GRU和RNN是(hidden,)
    input_feed : RNN的上一步输出,这里拿来当下一步的输入
    """
    hidden: tuple = None
    input_feed: torch.Tensor = None

    def __init__(self, hidden=None, input_feed=None):
        self.hidden = hidden
        self.input_feed = input_feed


class RNNDecoderBase(BaseDecoder):
    """两种RNN的抽象基类"""

    def __init__(self,
                 rnn_type,
                 bidirectional_encoder,
                 num_layers,
                 hidden_size,
                 attn_type="dot",
                 dropout=0.0,
                 embedding=None, ):
        super(RNNDecoderBase, self).__init__(attentional=attn_type != "none" and attn_type is not None)

        self.bidirectional_encoder = bidirectional_encoder
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.embedding = embedding
        self.dropout = nn.Dropout(dropout)
        self.state = RNNCellState()
        #
        self.input_size = None
        self.set_input_size()
        #
        self.rnn = rnn_factory(
            rnn_type,
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout
        )

        if not self.attentional:
            pass
        else:
            self.attn = GlobalAttention(
                dim=hidden_size,
                attn_type=attn_type,
            )

    def init_state(self, encoder_final):
        """Initialize decoder state with last state of the encoder."""

        def _fix_enc_hidden(hidden):
            return torch.cat([hidden[0:hidden.size(0):2], hidden[1:hidden.size(0):2]], 2)

        if self.bidirectional_encoder:
            encoder_final = tuple_map(_fix_enc_hidden, encoder_final)

        if isinstance(encoder_final, tuple):  # LSTM
            self.state.hidden = encoder_final
        else:  # GRU
            self.state.hidden = (encoder_final,)

        batch_size = self.state.hidden[0].shape[1]  # [num_layers, batch, hidden_size]
        h_size = (batch_size, self.hidden_size)
        device = self.state.hidden[0].device
        self.state.input_feed = torch.zeros(h_size, device=device)

    def forward(self, tgt, memory_bank, memory_lengths):
        dec_state, dec_outs, attn = self._run_forward_pass(tgt, memory_bank, memory_lengths=memory_lengths)
        if not isinstance(dec_state, tuple):
            dec_state = (dec_state,)  # GRU、RNN
        self.state.hidden = dec_state
        self.state.input_feed = dec_outs[:, -1, :]
        return dec_outs, attn

    def set_input_size(self):
        raise NotImplementedError


class StdRNNDecoder(RNNDecoderBase):
    """
    标准-RNN
    Args:
        rnn_type ['GRU'、’LSTM‘、'RNN']之一
        bidirectional_encoder encoder是否为双向的
        num_layers num_layers层数
        hidden_size hidden_size，当encoder是双向的时候，应该为encoder hidden的两倍
        attn_type ['general','dot','mlp']之一
        dropout=0.0,
        embedding=None,

    Input :
        tgt : [batch, max_tgt_len]
        memory_bank : [batch, max_src_len, hidden_size]]
        memory_lengths : [batch,]

    Output:
        dec_outs :
            [batch, max_tgt_len, hidden_size]
        attn :
            [batch, max_tgt_len, max_src_len]
    """

    def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
        attn = None
        emb = self.embedding(tgt)  # [batch, tgt_len, hidden_size]
        if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):
            rnn_output, dec_state = self.rnn(emb, self.state.hidden[0])
        else:
            rnn_output, dec_state = self.rnn(emb, self.state.hidden)

        # rnn_output [batch, tgt_len, hidden_size]
        # dec_state

        # Calculate the attention.
        if not self.attentional:
            dec_outs = rnn_output
        else:
            dec_outs, attn = self.attn(
                tgt=rnn_output,
                memory_bank=memory_bank,
                memory_lengths=memory_lengths
            )
            # dec_outs : [batch, tgt_len, hidden_size]
            # attn : [batch, max_tgt_len, max_src_len]
        dec_outs = self.dropout(dec_outs)
        return dec_state, dec_outs, attn

    def set_input_size(self):
        self.input_size = self.embedding.embedding_dim


class InputFeedRNNDecoder(RNNDecoderBase):
    """
    Input-feed-forward 机制的RNN
    Args:
        rnn_type ['GRU'、’LSTM‘、'RNN']之一
        bidirectional_encoder encoder是否为双向的
        num_layers num_layers层数
        hidden_size hidden_size 当encoder是双向的时候，应该为encoder hidden的两倍
        attn_type ['general','dot','mlp']之一
        dropout=0.0,
        embedding=None,

    Input :
        tgt : [batch, max_tgt_len]
        memory_bank : [batch, max_src_len, hidden_size]]
        memory_lengths : [batch,]

    Output:
        dec_outs :
            [batch, max_tgt_len, hidden_size]
        attn :
            [batch, max_tgt_len, max_src_len]
    """

    def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
        attn_history = []
        output_history = []

        emb = self.embedding(tgt)  # [batch, tgt_len, hidden_size]
        dec_state = self.state.hidden  # [batch, hidden_size]
        if len(dec_state) == 1:
            dec_state = dec_state[0]
        input_feed = self.state.input_feed.unsqueeze(1)  # [batch,1, hidden_size]

        for emb_t in emb.split(1, dim=1):
            # emb_t [batch, 1, hidden_size]
            decoder_input = torch.cat([emb_t, input_feed], -1)  # [batch, 1, embedding_size + hidden_size]
            rnn_output, dec_state = self.rnn(decoder_input, dec_state)
            # rnn_output [batch_size, 1 , 2*hidden_size]
            # dec_state [num_layers, batch_size ,hidden_size]
            if self.attentional:
                context_vector, attn = self.attn(
                    tgt=rnn_output,
                    memory_bank=memory_bank,
                    memory_lengths=memory_lengths
                )
                # context_vector [batch, 1, hidden_size]
                # attn [batch, 1, max_src_len]
                attn_history.append(attn.squeeze(1))
            else:
                context_vector = rnn_output
            output_history.append(context_vector.squeeze(1))
            input_feed = context_vector  # [batch,1, hidden_size]

        if self.attentional:
            attn_history = torch.stack(attn_history, dim=1)  # [batch,tgt_len,src_len]
        else:
            attn_history = None
        output_history = torch.stack(output_history, dim=1)  # [batch,tgt_len,src_len]
        return dec_state, output_history, attn_history

    def set_input_size(self):
        self.input_size = self.embedding.embedding_dim + self.hidden_size


if __name__ == '__main__':
    pass
# t_num_layers = 3
# t_batch_size = 4
# t_embedding_dim = 6
# t_hidden = 12
# t_rnn = 'GRU'
#
# t_emb = nn.Embedding(12, t_embedding_dim)
#
# from modules.encoders.rnn_encoder import RNNEncoder
#
# t_enc = RNNEncoder(
#     rnn_type=t_rnn,
#     bidirectional=True,
#     num_layers=t_num_layers,
#     hidden_size=t_hidden,
#     dropout=0.2,
#     embedding=t_emb
# )
#
# t_src = torch.tensor([
#     [1, 2, 3, 4, 5],
#     [2, 3, 4, 5, 0],
#     [3, 4, 5, 0, 0],
#     [4, 5, 0, 0, 0]
# ])
# t_src_len = torch.tensor([5, 4, 3, 2])
#
# t_final_state, t_memory_bank, _ = t_enc(t_src, t_src_len)
#
# sr = StdRNNDecoder(rnn_type=t_rnn,
#                    bidirectional_encoder=True,
#                    num_layers=t_num_layers,
#                    hidden_size=t_hidden * 2,
#                    attn_type="dot",
#                    dropout=0.2,
#                    embedding=t_emb)
#
# sr.init_state(t_final_state)
# t_tgt = torch.ones(t_batch_size, 7).to(torch.long)
#
# sr(t_tgt, t_memory_bank, t_src_len)
# sr = InputFeedRNNDecoder(rnn_type="GRU",
#                          bidirectional_encoder=True,
#                          num_layers=t_num_layers,
#                          hidden_size=t_hidden,
#                          attn_type="dot",
#                          dropout=0.0,
#                          embedding=t_emb)
# sr.init_state(t_gru_ef)
# t_tgt = torch.ones(t_batch_size, 7).to(torch.long)
# t_memory_bank = torch.zeros(t_batch_size, 17, t_hidden)
# t_memory_lengths = torch.ones(t_batch_size) * 17
# sr(t_tgt, t_memory_bank, t_memory_lengths)
