from typing import List

import torch
import torch.nn as nn
from ginnm import tuple_map

from mec.modules.rnn_encoder import RNNEncoder


class ParallelRNNEncoder(nn.Module):
    """
    Args:
        encoders_list : n * List[RNNEncoder]
    Input:
        src_list : n * [batch, max_src_len]
        lengths_list : n * [batch,]
    Output:
        states:
           LSTM : n * [num_layers * directions, batch_size, hidden_size], [num_layers * directions, batch_size, hidden_size]
           GRU or RNN : n* [num_layers * directions, batch_size, hidden_size]
        memory_banks: n * [batch_size, max_src_len, num_directions * hidden_size]
        lengths : n * [batch, ]
    """

    def __init__(self, encoders_list: List[RNNEncoder]):
        super(ParallelRNNEncoder, self).__init__()

        encoders_hidden_size = [each.hidden_size for each in encoders_list]
        assert len(set(encoders_hidden_size)) == 1  # 保证每个encoder的hidden大小都是相同的

        self.total_hidden = sum(encoders_hidden_size)
        self.hidden_size = encoders_hidden_size[0]
        self.encoders = nn.ModuleList(encoders_list)
        self.num_encoders = len(encoders_hidden_size)

    def forward(self, src_list, lengths_list=None):
        memory_banks = []
        states = []
        for idx, encoder in enumerate(self.encoders):
            state, memory_bank, _ = encoder(src_list[idx], lengths_list[idx])

            def _fix_enc_hidden(hidden):
                """双向的只取最后一个方向"""
                final_idx = torch.arange(encoder.num_layers, device=hidden.device) * 2 + 1  # 1,3,5,...
                hidden = torch.index_select(hidden, 0, final_idx)
                return hidden

            if encoder.directions == 2:
                state = tuple_map(_fix_enc_hidden, state)
            memory_banks.append(memory_bank)
            states.append(state)

        return states, memory_banks, lengths_list
