import torch
import torch.nn as nn
import torch.nn.functional as F

class DecoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
        super(DecoderLayer, self).__init__()
        self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.feedforward = PositionwiseFeedForward(d_model, dim_feedforward, dropout)

    def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
                tgt_key_padding_mask=None, memory_key_padding_mask=None):
        r"""Pass the input through the decoder layer.

        Args:
            tgt: Tensor, shape [tgt_len, batch, embed_dim]
            memory: Tensor, shape [src_len, batch, embed_dim]
            tgt_mask: Tensor, shape [tgt_len, tgt_len], optional,
                The mask for the tgt sequence. Optional masked sequence encoding
                (to predict next token).
            memory_mask: Tensor, shape [src_len, src_len], optional,
                The mask for the memory sequence.
            tgt_key_padding_mask: Tensor, shape [batch, tgt_len], optional,
                A byte tensor where positions are set to `True` for padded
                positions in `tgt`.
            memory_key_padding_mask: Tensor, shape [batch, src_len], optional,
                A byte tensor where positions are set to `True` for padded
                positions in `memory`.

        Returns:
            output: Tensor, shape [tgt_len, batch, embed_dim]
        """
        q = k = v = tgt

        # Self-attention layer
        q, attn = self.self_attn(q, k, value=v, attn_mask=tgt_mask,
                                 key_padding_mask=tgt_key_padding_mask)[:2]
        q = self.dropout1(q)
        q = self.norm1(q + tgt)

        # Encoder-Decoder attention layer
        q, attn = self.multihead_attn(q, memory, memory, attn_mask=memory_mask,
                                     key_padding_mask=memory_key_padding_mask)
        q = self.dropout2(q)
        q = self.norm2(q + tgt)

        # Feedforward layer
        output = self.feedforward(q)
        output = self.dropout3(output)
        output = self.norm3(output + q)

        return output, attn

class Decoder(nn.Module):
    def __init__(self, decoder_layers, d_model, nhead, dim_feedforward=2048, dropout=0.1):
        super(Decoder, self).__init__()
        self.layers = nn.ModuleList([DecoderLayer(d_model, nhead, dim_feedforward, dropout)
                                    for _ in range(decoder_layers)])
        self.norm = nn.LayerNorm(d_model)

    def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
                tgt_key_padding_mask=None, memory_key_padding_mask=None):
        r"""Pass the decoded input through the decoder layers in turn.

        Args:
            tgt: Tensor, shape [tgt_len, batch, embed_dim]
            memory: Tensor, shape [src_len, batch, embed_dim]
            tgt_mask: Tensor, shape [tgt_len, tgt_len], optional,
                The mask for the tgt sequence.
            memory_mask: Tensor, shape [src_len, src_len], optional,
                The mask for the memory sequence.
            tgt_key_padding_mask: Tensor, shape [batch, tgt_len], optional,
                A byte tensor where positions are set to `True` for padded
                positions in `tgt`.
            memory_key_padding_mask: Tensor, shape [batch, src_len], optional,
                A byte tensor where positions are set to `True` for padded
                positions in `memory`.

        Returns:
            output: Tensor, shape [tgt_len, batch, embed_dim]
        """
        output = tgt

        for layer in self.layers:
            output, attn = layer(output, memory, tgt_mask=tgt_mask,
                                 memory_mask=memory_mask,
                                 tgt_key_padding_mask=tgt_key_padding_mask,
                                 memory_key_padding_mask=memory_key_padding_mask)

        return self.norm(output)

