import torch
from torch import nn
import Rope


class Attention(nn.Module):

    def __init__(
        self,
        input_dim,
        n_q_heads,
        n_kv_heads,
    ):
        super().__init__()

        self._n_q_heads = n_q_heads
        self._n_kv_heads = n_kv_heads

        self._group = n_q_heads // n_kv_heads

        self._head_size = input_dim // self._n_q_heads

        self._qw = nn.Linear(input_dim, self._head_size * self._n_q_heads)
        self._kw = nn.Linear(input_dim, self._head_size * self._n_kv_heads)
        self._vw = nn.Linear(input_dim, self._head_size * self._n_kv_heads)
        self._ow = nn.Linear(input_dim, input_dim)

    def forward(self, x, freq_cis):
        _bn, _seq, _ = x.shape
        _dk = self._head_size**0.5

        _q, _k, _v = self._qw(x), self._kw(x), self._vw(x)

        _q = _q.reshape(_bn, _seq, self._n_q_heads, self._head_size)
        _k = _k.reshape(_bn, _seq, self._n_kv_heads, self._head_size)
        _v = _v.reshape(_bn, _seq, self._n_kv_heads, self._head_size)

        _q = Rope.apply_rotary_emb(_q, freq_cis[:_seq])
        _k = Rope.apply_rotary_emb(_k, freq_cis[:_seq])

        _q = _q.permute(0, 2, 1, 3)
        _k = _k.permute(0, 2, 1, 3)
        _v = _v.permute(0, 2, 1, 3)

        _causul = torch.ones(_seq, _seq)
        _causul = torch.triu(_causul, diagonal=1)
        _causul[_causul == 1] = -torch.inf
        _causul = _causul.to(x.device)

        _k = (
            _k[:, None]
            .repeat(1, self._group, 1, 1, 1)
            .reshape(_bn, -1, _seq, self._head_size)
        )
        _v = (
            _v[:, None]
            .repeat(1, self._group, 1, 1, 1)
            .reshape(_bn, -1, _seq, self._head_size)
        )

        _score = _q @ _k.permute(0, 1, 3, 2) / _dk
        _score = torch.softmax(_score + _causul, dim=-1)

        _o = _score @ _v

        _o = _o.permute(0, 2, 1, 3)
        _o = _o.reshape(_bn, _seq, -1)

        return self._ow(_o)
