from torch import nn
import torch
from vocab import Vocab


class DecoderCellState:
    hidden_state: None
    cell_state: None

    def __init__(self, hidden_state=None, cell_state=None):
        # self.output = output
        self.hidden_state = hidden_state
        self.cell_state = cell_state

    @staticmethod
    def select(x, indices, dim=0):
        return x.index_select(dim, indices)

    def batch_select(self, indices):
        n = DecoderCellState()
        n.hidden_state = self.select(self.hidden_state, indices, dim=1)
        n.cell_state = self.select(self.cell_state, indices, dim=1)
        return n


class BiLstmCell(nn.Module):
    """

    Args:
        embedding_size : Decoder的嵌入向量
        hidden_size : Decoder的隐层数量
        num_layers : Decoder的层数
        dropout : dropout的概率
    Input: pre_output, pre_state: DecoderCellState, encoder_outputs, src_lengths
        pre_embedding : [batch_size, embedding_size] 前一步的输出，当前步的输入
        pre_state :  前一步的cell state
            - context : [batch_size, hidden_size]
            - hidden_state : [num_layers, batch_size, hidden_size]
            - cell_state : [num_layers, batch_size, hidden_size]
            - alignments : [batch_size, max_src_len]
        encoder_outputs : [batch_size, max_src_len, hidden] encoder的输出
        src_lengths : [batch_size, ] encoder的输入的有效长度
    Output:
        decoder_cell_state :
            - context : [batch_size, hidden_size]
            - hidden_state : [num_layers, batch_size, hidden_size]
            - cell_state : [num_layers, batch_size, hidden_size]
            - alignments : [batch_size, max_src_len]
    """

    def __init__(self, vocab: Vocab, embedding_size, hidden_size, num_layers, dropout):
        super(BiLstmCell, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab.pad_id
        # self.dropout = nn.Dropout(dropout)
        self.cell_input_size = embedding_size  # decoder接收的输入是[前一步的输出,],[前一步的Cell]
        self.cell = nn.LSTM(self.cell_input_size,
                            hidden_size=hidden_size,
                            num_layers=num_layers,
                            batch_first=True,
                            dropout=dropout,
                            bidirectional=False)

    def forward(self, pre_out_embedding, pre_state: DecoderCellState, encoder_outputs, src_lengths):
        cell_input = pre_out_embedding.unsqueeze(1)  # [batch,1, embedding_size]

        if pre_state.hidden_state is None and pre_state.cell_state is None:
            output, (hidden_state, cell_state) = self.cell(cell_input)
        else:
            state = (
                pre_state.hidden_state,
                pre_state.cell_state
            )
            output, (hidden_state, cell_state) = self.cell(cell_input, state)
        output = output.squeeze(1)  # [batch, hidden_size]
        decoder_cell_state = DecoderCellState(
            hidden_state=hidden_state,
            cell_state=cell_state,
        )
        return output, decoder_cell_state


class LstmDecoder(nn.Module):
    """
    Args:
        vocab :
        embedding_size :
        hidden_size :
        num_layers :
        dropout :
    Input:
        src_batch : [batch_size, max_src_len]
        src_lengths : [batch_size, ]
        tgt_batch : [batch_size, max_tgt_len]
        final_encoder_hidden_state : [batch_size, hidden_size]
        final_encoder_cell_state : [batch_size, hidden_size]
        encoder_outputs : [batch_size, max_src_len, hidden_size]
    Output:
        vocab_dist : [batch_size, max_tgt_len, vocab_size]
        attention_history : [batch_size, max_tgt_len,  max_src_len]
    """

    def __init__(self,
                 vocab: Vocab,
                 embedding_size,
                 hidden_size,
                 num_layers,
                 dropout):
        super(LstmDecoder, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab.pad_id
        self.hidden_size = hidden_size
        self.embedding_size = embedding_size
        self.embedding = nn.Embedding(len(vocab), self.embedding_size, padding_idx=vocab.pad_id)

        self.out_layer = nn.Linear(hidden_size, self.vocab_size, bias=False)

        self.cell = BiLstmCell(vocab=vocab,
                               embedding_size=embedding_size,
                               hidden_size=hidden_size,
                               num_layers=num_layers,
                               dropout=dropout)

    @staticmethod
    def generate_init_cell_state(final_encoder_hidden_state, final_encoder_cell_state):
        return DecoderCellState(hidden_state=final_encoder_hidden_state, cell_state=final_encoder_cell_state)

    def one_step(self, cur_embed, prev_cell_state, encoder_outputs, src_batch, src_lengths):
        output, cur_decoder_cell_state = self.cell(cur_embed, prev_cell_state, encoder_outputs, src_lengths)
        logits = output
        gen_dist = self.out_layer(logits)
        gen_dist = torch.softmax(gen_dist, dim=1)  # [batch, vocab_size]

        return cur_decoder_cell_state, gen_dist

    def forward(self,
                src_batch,
                src_lengths,
                tgt_batch,
                final_encoder_hidden_state,
                final_encoder_cell_state,
                encoder_outputs):
        tgt_embeddings = self.embedding(tgt_batch)  # [batch, tgt_len ,embedding_size]
        prev_cell_state = self.generate_init_cell_state(final_encoder_hidden_state, final_encoder_cell_state)

        dist_history = []

        steps_tgt = torch.split(tgt_embeddings, 1, dim=1)  # max_tgt 个 [batch,1,embedding_size]
        for cur_embed in steps_tgt:
            cur_embed = cur_embed.squeeze(1)
            cur_decoder_cell_state, final_dist = self.one_step(cur_embed,
                                                               prev_cell_state,
                                                               encoder_outputs,
                                                               src_batch,
                                                               src_lengths)
            dist_history.append(final_dist)
            prev_cell_state = cur_decoder_cell_state

        dist = torch.stack(dist_history, 1)

        return dist
