import torch
from torch import nn
from .attention import create_NaiveAttention
from .linear import create_NaiveFFN

class DecoderBlock(nn.Module):
    def __init__(self, attn, ffn):
        '''
        @input attn:    attention layer creation factory
        @input ffn:     ffn layer creation factory
        '''
        super(DecoderBlock, self).__init__()
        self.attn=attn()
        self.ffn=ffn()
        
        self.ln1=nn.LayerNorm(self.attn.output_dim, dtype=self.attn.precision)
        self.ln2=nn.LayerNorm(self.ffn.output_dim, dtype=self.ffn.precision)
        
    def forward(self, x):
        attn=self.attn(x)
        attn=x+self.ln1(attn)
        out=self.ffn(attn)
        out=out+self.ln2(out)
        return out
    
class Decoder(nn.Module):
    def __init__(self, num_decoders, attn, ffn):
        super(Decoder, self).__init__()
        self.blocks=nn.Sequential()
        for _ in range(num_decoders):
            self.blocks.append(DecoderBlock(attn, ffn))
    
    def forward(self, x):
        out=x
        for block in self.blocks:
            out=block(out)
        return out
    


if __name__=='__main__':
    device='cuda' if torch.cuda.is_available else 'cpu'
    attn=create_NaiveAttention(
        input_dim=8,
        hidden_dim=4,
        output_dim=8,
        num_heads=2
    )
    ffn=create_NaiveFFN(
        input_dim=8,
        hidden_dim=32,
        output_dim=8
    )
    
    
    decoder=Decoder(
        num_decoders=4,
        attn=attn,
        ffn=ffn
    ).to(device)
    x=torch.randn([4, 16, 8], device=device)
    result=decoder(x)
    print(result.shape)
    