from typing import List, Tuple

import torch
import torch.nn as nn
from ginnm import tuple_map
from torch import Tensor

from mec.modules.global_attn import GlobalAttention
from mec.modules.reducer import MeanReducer, MlpReducer
from mec.utils.rnn import rnn_factory


class ParallelRNNCellState:
    """
    hidden : 对于LSTM是(hidden, cell), 对于GRU和RNN是(hidden,)
    input_feed : RNN的上一步输出,这里拿来当下一步的输入
    """
    hidden: Tuple[Tensor] = None
    input_feed: Tensor = None

    def __init__(self, hidden=None, input_feed=None):
        self.hidden = hidden
        self.input_feed = input_feed

    def batch_select(self, batch_indices):
        def hidden_f(t):
            return t.index_select(1, batch_indices)

        self.hidden = tuple_map(hidden_f, self.hidden)
        self.input_feed = self.input_feed.index_select(0, batch_indices)


class ParallelRNNDecoder(nn.Module):
    """
    Args:
        rnn_type : "GRU"、"LSTM"、"RNN"之一
        num_encoders : encoders的数量
        bidirectional_encoders : 是否为双向的encoder
        num_layers : decoder的层数
        hidden_size : 当encoder为双向的时候，应该为encoder hidden的两倍
        init_reduce : 对parallel encoders的降维方法
        context_reduce : 对初始状态的降维方法
        dropout : dropout 概率
        embedding : embedding 层

    Init State:
        states : num_encoders * [layers * directions, batch, encoder_hidden]

    Inputs:
        tgt : [batch, tgt_len]
        memory_banks : num_encoders * [batch, src_len, directions * encoder_hidden]
        memory_lengths : num_encoders * [batch, src_len,]

    Outputs:
        dec_outs : [batch, tgt_len, num_encoders * hidden]
        attn : num_encoders * [batch, tgt_len, src_len]
    """

    def __init__(self,
                 rnn_type,
                 num_encoders,
                 bidirectional_encoders: bool,
                 num_layers,
                 hidden_size,
                 init_reduce="mlp",
                 context_reduce="cat",
                 dropout=0.0,
                 embedding=None,
                 ):
        super(ParallelRNNDecoder, self).__init__()

        assert init_reduce in ["mlp", "mean"]
        assert context_reduce in ["cat"]

        # 初始化有关encoders的参数
        self.num_encoders = num_encoders
        self.bidirectional_encoders = bidirectional_encoders
        self.encoders_directions = 2 if bidirectional_encoders else 1
        self.encoders_type = rnn_type
        self.hidden_size = hidden_size
        if init_reduce == "mlp":
            if self.encoders_type == "LSTM":
                self.h_reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
                self.c_reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
            else:
                self.reducer = MlpReducer(num_encoders, self.hidden_size, dropout=dropout)
        elif init_reduce == "mean":
            if self.encoders_type == "LSTM":
                self.h_reducer = MeanReducer(dropout=dropout)
                self.c_reducer = MeanReducer(dropout=dropout)
            else:
                self.reducer = MeanReducer(dropout=dropout)
        else:
            raise ValueError("Not support reduce method")

        # 初始化超参数
        self.state = ParallelRNNCellState()
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.embedding = embedding
        self.dropout = nn.Dropout(dropout)
        self.input_size = embedding.embedding_dim + (num_encoders * self.hidden_size)
        self.rnn = rnn_factory(
            rnn_type,
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=dropout
        )

        # 注意力机制
        self.attn_list = nn.ModuleList([GlobalAttention(
            key_dim=hidden_size * 2 if bidirectional_encoders else hidden_size,
            query_dim=hidden_size
        ) for _ in range(num_encoders)])
        self.context_reduce = context_reduce

    def init_state(self, states):

        encoders_hidden: List[Tuple[Tensor]] = []
        for idx, each_state in enumerate(states):
            if isinstance(each_state, tuple):  # LSTM
                encoders_hidden.append(each_state)
            else:  # GRU RNN
                encoders_hidden.append((each_state,))

        if self.encoders_type in ['RNN', "GRU"]:  # RNN GRU
            hidden_states = [each[0] for each in encoders_hidden]
            hidden_states = self.reducer(hidden_states)
            self.state.hidden = (hidden_states,)
        else:  # LSTM
            hidden_states = [each[0] for each in encoders_hidden]
            cell_states = [each[1] for each in encoders_hidden]
            hidden_states = self.h_reducer(hidden_states)
            cell_states = self.c_reducer(cell_states)
            self.state.hidden = (hidden_states, cell_states)

        batch_size = self.state.hidden[0].shape[1]  # state.hidden 是 [num_layers, batch, hidden]
        h_size = (batch_size, self.hidden_size * self.num_encoders)
        device = self.state.hidden[0].device
        self.state.input_feed = torch.zeros(h_size, device=device)  # 第一个input_feed

    def forward(self, tgt, memory_banks: List[Tensor], memory_lengths: List[Tensor]):
        dec_state, dec_outs, attn = self._run_forward_pass(tgt, memory_banks, memory_lengths=memory_lengths)
        if not isinstance(dec_state, tuple):
            dec_state = (dec_state,)  # GRU、RNN
        self.state.hidden = dec_state
        self.state.input_feed = dec_outs[:, -1, :]
        return dec_outs, attn

    def _run_forward_pass(self, tgt, memory_banks: List[Tensor], memory_lengths: List[Tensor]):
        attn_history = []
        output_history = []

        emb = self.embedding(tgt)
        dec_state = self.state.hidden
        if len(dec_state) == 1:
            dec_state = dec_state[0]  # GRU
        input_feed = self.state.input_feed.unsqueeze(1)  # [batch,1, hidden_size]

        for emb_t in emb.split(1, dim=1):
            decoder_input = torch.cat([emb_t, input_feed], -1)
            rnn_output, dec_state = self.rnn(decoder_input, dec_state)

            attn_outs = [
                attn(
                    tgt=rnn_output,
                    memory_bank=memory_banks[idx],
                    memory_lengths=memory_lengths[idx]
                ) for idx, attn in enumerate(self.attn_list)
            ]

            context_vector = torch.cat([each[0] for each in attn_outs], -1)
            attn = [each[1] for each in attn_outs]
            attn_history.append(attn)
            output_history.append(context_vector.squeeze(1))
            input_feed = context_vector

        # attn_history tgt_len * [batch, 1, src_len]
        attn_history = [[each[i] for each in attn_history] for i in range(self.num_encoders)]

        for idx, _ in enumerate(attn_history):
            attn_history[idx] = torch.cat(attn_history[idx], dim=1)

        output_history = torch.stack(output_history, dim=1)
        return dec_state, output_history, attn_history
