# %%
import torch
import math
import torch.nn as nn
# %%
# block / layer
class SimpleDecoderLayer(nn.Module):
    def __init__(self, hidden_dim:int, head_num:int, attention_dropout_rate: float=0.1):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.head_num = head_num
        self.head_dim = hidden_dim // head_num

        # layer (mha, ffn)
        #  mha
        self.q = nn.Linear(hidden_dim, hidden_dim)
        self.k = nn.Linear(hidden_dim, hidden_dim)
        self.v = nn.Linear(hidden_dim, hidden_dim)

        self.out = nn.Linear(hidden_dim, hidden_dim)
        self.drop_att = nn.Dropout(attention_dropout_rate)
        self.att_ln = nn.LayerNorm(hidden_dim, eps = 1e-7)

        # ffn
        self.up_proj = nn.Linear(hidden_dim, hidden_dim * 4)
        self.down_proj = nn.Linear(hidden_dim * 4, hidden_dim)
        self.act_fn = nn.GELU()
        self.drop_ffn = nn.Dropout(0.1)
        self.ffn_ln = nn.LayerNorm(hidden_dim, eps=1e-7)
    
    def attention_layer(self, q, k, v, attention_mask=None):
        k = k.transpose(2, 3)
        attention_weight = q @ k / math.sqrt(self.head_dim)

        if attention_mask is not None:
            attention_mask = attention_mask.tril()
            attention_weight = attention_weight.masked_fill(
                attention_mask == 0, float("-inf")
            )
        else:
            attention_mask = torch.ones_like(
                attention_weight
            ).tril()

            attention_weight = attention_weight.masked_fill(
                attention_mask==0, float("-inf")
            )
        
        attention_weight = torch.softmax(
            attention_weight, dim=-1
        )

        attention_weight = self.drop_att(attention_weight)

        mid_out = torch.matmul(attention_weight, v)

        mid_out = mid_out.transpose(1, 2).contiguous()

        batch, seq, _, _ = mid_out.size()

        mid_out = mid_out.view(batch, seq, -1)

        output = self.out(mid_out)

        return output

    def mha(self, x, attention_mask=None):
        batch, seq, _ = x.size()
        q = self.q(x).view(batch, seq, self.head_num, self.head_dim).transpose(1,2)
        k = self.k(x).view(batch, seq, self.head_num, self.head_dim).transpose(1,2)
        v = self.v(x).view(batch, seq, self.head_num, self.head_dim).transpose(1,2)

        output = self.attention_layer(q, k, v, attention_mask)

        return self.att_ln(output)

    def ffn(self, x):
        up = self.up_proj(x)
        up = self.act_fn(up)
        down = self.down_proj(up)

        down = self.drop_ffn(down)

        return self.ffn_ln(x + down)

    def forward(self, x, attention_mask=None):
        X = self.mha(x,  attention_mask)
        X = self.ffn(X)
        return X

class Decoder(nn.Module):
    def __init__(self, ):
        super().__init__()

        self.layer_list = nn.ModuleList(
            [
                SimpleDecoderLayer(64, 8) for i in range(5)
            ]
        )

        self.emb = nn.Embedding(12, 64)
        self.out = nn.Linear(64, 12)

    def forward(self, x, mask=None):
        X = self.emb(x)
        for i, l in enumerate(self.layer_list):
            X = l(X, mask)
        print(X.shape)
        output = self.out(X)

        return torch.softmax(output, dim=-1) 
# %%
x = torch.randint(low=0, high=12, size=(3,4))
# %%
x
# %%
x.shape
# %%
net = Decoder()
# %%
mask = torch.tensor(
    [
        [1, 1, 1, 0],
        [1, 1, 0, 0],
        [1, 0, 0, 0]
    ]
).unsqueeze(1).unsqueeze(2).repeat(1, 8, 4, 1)
# %%
mask
# %%
mask.shape
# %%
net(x, mask)
# %%
