import torch
from torch import nn
import mul_head_attention as mda
import feed_forward as ff
import residual_connection as rc
class DecoderBlock(nn.Module):
    def __init__(self, dim_model, ffn_hidden, head_num, dropout_p):
        super(DecoderBlock, self).__init__()
        self.attention1 = mda.MultiHeadAttention(dim_model, head_num, dropout_p)
        self.cross_attention = mda.MultiHeadAttention(dim_model, head_num, dropout_p)
        self.ffn = ff.PositionwiseFeedForward(dim_model, ffn_hidden, dropout_p)
        self.residual_connections = nn.ModuleList(
            [rc.ResidualConnection(dim_model, dropout_p) for _ in range(3)])
    
    def forward(self, x, enc_output, t_mask, s_mask):
        x = self.residual_connections[0](x, lambda x:self.attention1(x, x, x, t_mask))
        x = self.residual_connections[1](x, lambda x:self.cross_attention(x, enc_output, enc_output, s_mask))
        x = self.residual_connections[2](x, self.ffn)
        return x

import layer_norm as ln
class Decoder(nn.Module):
    def __init__(self, dim_model, 
                 ffn_hidden, head_num, block_num, dropout_p):
        super(Decoder, self).__init__()
        self.layers = nn.ModuleList(
            [
                DecoderBlock(dim_model, ffn_hidden, head_num, dropout_p)
                for _ in range(block_num)
            ]
        )
        self.norm = ln.LayerNorm(dim_model)
    
    def forward(self, x, enc_output, s_mask, t_mask):
        for layer in self.layers:
            x = layer(x, enc_output, s_mask, t_mask)
        return self.norm(x)

