import torch


__all__ = ["DecoderLSTM", "DecoderAttentionLSTM", "PointerGeneratorDecoder"]


class DecoderLSTM(torch.nn.Module):
    def __init__(self, embedding, hid_dim, n_layers, dropout):
        super().__init__()
        self.hid_dim = hid_dim
        self.n_layers = n_layers
        self.embedding = embedding
        self.rnn = torch.nn.LSTM(
            embedding.embedding_dim,
            hid_dim,
            n_layers,
            dropout=dropout,
            batch_first=True,
        )
        self.fc_out = torch.nn.Linear(hid_dim, embedding.num_embeddings)
        self.dropout = torch.nn.Dropout(dropout)

    def forward(self, input, hidden, cell, *args, **kwargs):
        input = input.unsqueeze(1)
        embedded = self.dropout(self.embedding(input))
        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
        prediction = self.fc_out(output.squeeze(1))
        return prediction, hidden, cell


class DecoderAttentionLSTM(torch.nn.Module):
    def __init__(
        self,
        embedding,
        output_dim,
        enc_hid_dim,
        dec_hid_dim,
        n_layers,
        dropout,
        attention,
    ):
        super().__init__()
        self.output_dim = output_dim
        self.hid_dim = dec_hid_dim
        self.n_layers = n_layers
        self.embedding = embedding
        self.rnn = torch.nn.LSTM(
            embedding.embedding_dim,
            dec_hid_dim,
            n_layers,
            dropout=dropout,
            batch_first=True,
        )
        self.fc_out = torch.nn.Linear(enc_hid_dim + dec_hid_dim, output_dim)
        self.dropout = torch.nn.Dropout(dropout)
        self.attention = attention

    def forward(self, input, hidden, cell, encoder_outputs, encoder_mask):
        # input: [batch size], hidden: [batch size, n layers, hid dim], cell: [batch size, n layers, hid dim]
        input = input.unsqueeze(1)  # [batch size, 1]
        embedded = self.dropout(self.embedding(input))  # [batch size, 1, emb dim]

        # Run through RNN
        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))

        # Calculate attention weights
        attention_weights = self.attention(
            hidden[-1],
            encoder_outputs,
            encoder_mask,
        )

        # Apply attention weights to encoder outputs
        encoder_outputs = encoder_outputs.permute(0, 2, 1)
        weighted = torch.bmm(encoder_outputs, attention_weights.unsqueeze(2)).squeeze(2)

        # Prepare input for the fully connected layer
        rnn_output = hidden[-1]
        output = torch.cat((rnn_output, weighted), dim=1)
        prediction = self.fc_out(output)

        return prediction, hidden, cell


class PointerGeneratorDecoder(torch.nn.Module):
    def __init__(
        self,
        embedding,
        output_dim,
        enc_hid_dim,
        dec_hid_dim,
        n_layers,
        dropout,
        attention,
    ):
        super().__init__()
        emb_dim = embedding.embedding_dim
        self.output_dim = output_dim
        self.hid_dim = dec_hid_dim
        self.n_layers = n_layers
        self.embedding = embedding
        self.rnn = torch.nn.LSTM(
            emb_dim,
            dec_hid_dim,
            n_layers,
            dropout=dropout,
            batch_first=True,
        )
        self.fc_out = torch.nn.Linear(dec_hid_dim + enc_hid_dim, output_dim)
        self.dropout = torch.nn.Dropout(dropout)
        self.attention = attention
        self.pointer = torch.nn.Linear(dec_hid_dim + enc_hid_dim, 1)
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, input, hidden, cell, encoder_outputs, encoder_mask):
        # input: [batch size], hidden: [batch size, n layers, hid dim], cell: [batch size, n layers, hid dim]
        input = input.unsqueeze(1)
        embedded = self.dropout(self.embedding(input))

        # Run through rnn
        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))

        # Calculate attention weights
        attention_weights = self.attention(
            hidden[-1],
            encoder_outputs,
            encoder_mask,
        )

        # Apply attention weights to encoder outputs
        encoder_outputs = encoder_outputs.permute(0, 2, 1)
        weighted = torch.bmm(encoder_outputs, attention_weights.unsqueeze(2)).squeeze(2)

        # Prepare input for the fully connected layer
        rnn_output = hidden[-1]
        output = torch.cat((rnn_output, weighted), dim=1)
        prediction = self.fc_out(output)

        # Calculate pointer-generator
        pointer = self.sigmoid(self.pointer(output))
        return prediction, hidden, cell, attention_weights, pointer
