# %%
import torch
import math
import torch.nn as nn
# %%
class GroupQueryAttention(nn.Module):
    def __init__(self, hidden_dim, nums_head, nums_key_values_head):
        super().__init__()
        assert hidden_dim % nums_head == 0
        assert nums_head % nums_key_values_head == 0
        self.hidden_dim = hidden_dim
        self.nums_head = nums_head
        self.nums_key_values_head = nums_key_values_head
        self.head_dim = hidden_dim // nums_head

        self.q = nn.Linear(hidden_dim, nums_head * self.head_dim)
        self.k = nn.Linear(hidden_dim, nums_key_values_head * self.head_dim)
        self.v = nn.Linear(hidden_dim, nums_key_values_head * self.head_dim)

        self.o = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, x, attention_mask=None):
        batch_size, seq, _ = x.size()

        q = self.q(x)
        k = self.k(x)
        v = self.v(x)

        q = q.view(batch_size, seq, self.nums_head, self.head_dim)
        k = k.view(batch_size, seq, self.nums_key_values_head, self.head_dim)
        v = k.view(batch_size, seq, self.nums_key_values_head, self.head_dim)

        q = q.transpose(1, 2)
        k = k.transpose(1, 2)
        v = v.transpose(1, 2)

        k = k.repeat_interleave(self.nums_head // self.nums_key_values_head, dim=1)
        v = v.repeat_interleave(self.nums_head // self.nums_key_values_head, dim=1)

        attention_score = (q @ k.transpose(2, 3)) / math.sqrt(self.head_dim)

        attention_weight = torch.softmax(attention_score, dim=-1)

        output = attention_weight @ v

        output = output.transpose(1, 2).contiguous()

        final_output = self.o(output.view(batch_size, seq, -1))

        return final_output
# %%
x = torch.rand(3, 2, 128)
net = GroupQueryAttention(128, 8, 4)
# %%
net(x)
# %%
net(x).shape
# %%
