import torch.nn as nn
from modules.encoders.parallel_encoder import ParallelRnnEncoder
from typing import List, Union, Tuple
from torch import Tensor
from utils.common import tuple_map
from utils.rnn_factory import rnn_factory
from modules.global_attn import GlobalAttention
from layers.reducer import MeanReducer, MlpReducer
import torch
from utils.common import tuple_map


class ParallelRNNCellState:
    """
    hidden : 对于LSTM是(hidden,cell), 对于GRU和RNN是(hidden,)
    input_feed : RNN的上一步输出,这里拿来当下一步的输入
    """
    hidden: Tuple[Tensor] = None
    input_feed: Tensor = None

    def __init__(self, hidden=None, input_feed=None):
        self.hidden = hidden
        self.input_feed = input_feed

    def batch_select(self, batch_indices):
        def hidden_f(t):
            return t.index_select(1, batch_indices)

        self.hidden = tuple_map(hidden_f, self.hidden)
        self.input_feed = self.input_feed.index_select(0, batch_indices)


class ParallelRNNDecoder(nn.Module):
    """
    Args:
        rnn_type : "GRU"、"LSTM"、"RNN"之一
        num_encoders : encoders的数量
        bidirectional_encoders : 是否为双向的encoder
        num_layers : decoder的层数
        hidden_size : 当encoder为双向的时候，应该为encoder hidden的两倍
        reduce : 对parallel encoders的pooling方法
        dropout : dropout 概率
        embedding : embedding 层

    Init State:
        states : num_encoders * [layers * directions, batch, encoder_hidden]

    Inputs:
        tgt : [batch, tgt_len]
        memory_banks : num_encoders * [batch, src_len, directions * encoder_hidden]
        memory_lengths : num_encoders * [batch, src_len,]

    Outputs:
        dec_outs : [batch, tgt_len, num_encoders * hidden]
        attn : num_encoders * [batch, tgt_len, src_len]
    """

    def __init__(self,
                 rnn_type,
                 num_encoders,
                 bidirectional_encoders: bool,
                 num_layers,
                 hidden_size,
                 reduce="mlp",
                 dropout=0.0,
                 embedding=None,
                 ):
        super(ParallelRNNDecoder, self).__init__()

        # 初始化有关encoders的参数
        self.num_encoders = num_encoders
        self.bidirectional_encoders = bidirectional_encoders
        self.encoders_directions = 2 if bidirectional_encoders else 1
        self.encoders_type = rnn_type
        self.hidden_size = hidden_size
        if reduce == "mlp":
            if self.encoders_type == "LSTM":
                self.h_reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
                self.c_reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
            else:
                self.reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
        elif reduce == "mean":
            if self.encoders_type == "LSTM":
                self.h_reducer = MeanReducer(dropout=dropout)
                self.c_reducer = MeanReducer(dropout=dropout)
            else:
                self.reducer = MeanReducer(dropout=dropout)
        else:
            raise ValueError("Not support reduce method")

        # 初始化超参数
        self.state = ParallelRNNCellState()
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.embedding = embedding
        self.dropout = nn.Dropout(dropout)
        self.input_size = embedding.embedding_dim + (num_encoders * self.hidden_size)
        self.rnn = rnn_factory(
            rnn_type,
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=dropout
        )

        # 注意力机制
        self.attn_list = nn.ModuleList([GlobalAttention(
            dim=hidden_size,
            attn_type="dot",
        ) for _ in range(num_encoders)])

    def init_state(self, states):
        def _fix_enc_hidden(hidden):
            return torch.cat([hidden[0:hidden.size(0):2], hidden[1:hidden.size(0):2]], 2)

        encoders_hidden: List[Tuple[Tensor]] = []
        for idx, each_state in enumerate(states):
            if self.bidirectional_encoders:
                each_state = tuple_map(_fix_enc_hidden, each_state)  # [directions * num_layers, batch_size, hidden]
            if isinstance(each_state, tuple):  # LSTM
                encoders_hidden.append(each_state)
            else:  # GRU RNN
                encoders_hidden.append((each_state,))

        if self.encoders_type in ['RNN', "GRU"]:  # RNN GRU
            hidden_states = [each[0] for each in encoders_hidden]
            hidden_states = self.reducer(hidden_states)
            self.state.hidden = (hidden_states,)
        else:
            hidden_states = [each[0] for each in encoders_hidden]
            cell_states = [each[1] for each in encoders_hidden]
            hidden_states = self.h_reducer(hidden_states)
            cell_states = self.c_reducer(cell_states)
            self.state.hidden = (hidden_states, cell_states)

        batch_size = self.state.hidden[0].shape[1]
        h_size = (batch_size, self.hidden_size * self.num_encoders)
        device = self.state.hidden[0].device
        self.state.input_feed = torch.zeros(h_size, device=device)

    def forward(self, tgt, memory_banks: List[Tensor], memory_lengths: List[Tensor]):
        dec_state, dec_outs, attn = self._run_forward_pass(tgt, memory_banks, memory_lengths=memory_lengths)
        if not isinstance(dec_state, tuple):
            dec_state = (dec_state,)  # GRU、RNN
        self.state.hidden = dec_state
        self.state.input_feed = dec_outs[:, -1, :]
        return dec_outs, attn

    def _run_forward_pass(self, tgt, memory_banks: List[Tensor], memory_lengths: List[Tensor]):
        attn_history = []
        output_history = []

        emb = self.embedding(tgt)
        dec_state = self.state.hidden
        if len(dec_state) == 1:
            dec_state = dec_state[0]
        input_feed = self.state.input_feed.unsqueeze(1)  # [batch,1, hidden_size]

        for emb_t in emb.split(1, dim=1):
            decoder_input = torch.cat([emb_t, input_feed], -1)
            rnn_output, dec_state = self.rnn(decoder_input, dec_state)

            attn_outs = [
                attn(
                    tgt=rnn_output,
                    memory_bank=memory_banks[idx],
                    memory_lengths=memory_lengths[idx]
                ) for idx, attn in enumerate(self.attn_list)
            ]

            context_vector = torch.cat([each[0] for each in attn_outs], -1)
            attn = [each[1] for each in attn_outs]

            attn_history.append(attn)
            output_history.append(context_vector.squeeze(1))
            input_feed = context_vector

        # attn_history tgt_len * [batch, 1, src_len]
        attn_history = [[each[i] for each in attn_history] for i in range(self.num_encoders)]
        for idx, _ in enumerate(attn_history):
            attn_history[idx] = torch.cat(attn_history[idx], dim=1)

        output_history = torch.stack(output_history, dim=1)
        return dec_state, output_history, attn_history


if __name__ == '__main__':
    import torch
    from modules.encoders.rnn_encoder import RNNEncoder

    t_embedding = torch.nn.Embedding(12, 7, 0)
    t_src_list = [
        torch.tensor([
            [1, 2, 3, 4, 5],
            [2, 3, 4, 5, 0],
            [3, 4, 5, 0, 0],
            [4, 5, 0, 0, 0]
        ]), torch.tensor([
            [1, 2, 3, 4, 5],
            [2, 3, 4, 5, 0],
            [3, 4, 5, 0, 0],
            [4, 5, 0, 0, 0]
        ])
    ]
    t_src_len = [torch.tensor([5, 4, 3, 2]), torch.tensor([5, 4, 3, 2])]
    rnn_encoders = [RNNEncoder(
        rnn_type="GRU",
        bidirectional=True,
        num_layers=2,
        hidden_size=32,
        dropout=0.0,
        embedding=t_embedding
    ), RNNEncoder(
        rnn_type="GRU",
        bidirectional=True,
        num_layers=2,
        hidden_size=32,
        dropout=0.0,
        embedding=t_embedding
    )]
    pe = ParallelRnnEncoder(
        encoders_list=rnn_encoders
    )
    d1, d2, d3 = pe(t_src_list, t_src_len)
    ttgt = torch.tensor([
        [1, 2, 3, 4, 5],
        [2, 3, 4, 5, 0],
        [3, 4, 5, 0, 0],
        [4, 5, 0, 0, 0]
    ])
    pd = ParallelRNNDecoder(
        rnn_type="GRU",
        num_encoders=2,
        bidirectional_encoders=True,
        num_layers=2,
        hidden_size=64,
        reduce='mean',
        dropout=0.2,
        embedding=t_embedding
    )
    pd.init_state(states=d1)
    pd.forward(tgt=ttgt, memory_banks=d2, memory_lengths=d3)
