import torch
import triton
import triton.language as tl


# grid: (bsz, num_q_heads, Tr)
@triton.jit
def _attn_kernel(
    q_ptr,
    k_ptr,
    v_ptr,
    o_ptr,
    num_groups,
    scaling,
    stride_qb,
    stride_qh,
    stride_ql,
    stride_qd,
    stride_kb,
    stride_kh,
    stride_kl,
    stride_kd,
    stride_vb,
    stride_vh,
    stride_vl,
    stride_vd,
    stride_ob,
    stride_oh,
    stride_ol,
    stride_od,
    bsz,
    num_q_heads,
    q_len,
    kv_len,
    head_dim: tl.constexpr,
    pad_kv_len: tl.constexpr,
    Br: tl.constexpr,
    Bc: tl.constexpr,
):
    bsz_id = tl.program_id(0)
    head_id = tl.program_id(1)
    seq_start = tl.program_id(2) * Br
    if (bsz_id >= bsz) | (head_id >= num_q_heads):
        return
    kv_head_id = head_id // num_groups

    q_ptr += bsz_id * stride_qb + head_id * stride_qh + seq_start * stride_ql
    k_ptr += bsz_id * stride_kb + kv_head_id * stride_kh
    v_ptr += bsz_id * stride_vb + kv_head_id * stride_vh
    o_ptr += bsz_id * stride_ob + head_id * stride_oh + seq_start * stride_ol

    i_offs = tl.arange(0, Br)
    j_offs = tl.arange(0, Bc)
    head_dim_offs = tl.arange(0, head_dim)

    qi = tl.load(
        q_ptr + i_offs[:, None] * stride_ql + head_dim_offs[None, :] * stride_qd,
        mask=(i_offs[:, None] < q_len),
        other=0.0,
    )
    q_dtype = qi.dtype
    oi = tl.zeros((Br, head_dim), dtype=q_dtype)
    li = tl.zeros((Br,), dtype=tl.float32) + 1.0
    mi = tl.zeros((Br,), dtype=tl.float32) - float("inf")

    for j in range(0, pad_kv_len, Bc):
        j = tl.multiple_of(j, Bc)
        kj_T = tl.load(
            k_ptr
            + (j + j_offs[None, :]) * stride_kl
            + head_dim_offs[:, None] * stride_kd,
            mask=((j + j_offs[None, :]) < kv_len),
        )
        vj = tl.load(
            v_ptr
            + (j + j_offs[:, None]) * stride_vl
            + head_dim_offs[None, :] * stride_vd,
            mask=((j + j_offs[:, None]) < kv_len),
        )

        s = tl.dot(qi, kj_T)
        mask = (seq_start + i_offs[:, None] < q_len) & (j + j_offs[None, :] < kv_len)
        s = tl.where(mask, s * scaling, -float("inf"))
        m_ij = tl.max(s, axis=1)
        mi_new = tl.maximum(mi, m_ij)
        p = tl.exp(s - mi_new[:, None])
        li = tl.exp(mi - mi_new) * li + tl.sum(p, axis=1)
        oi = oi * tl.exp(mi - mi_new)[:, None] + tl.dot(p.to(q_dtype), vj)
        oi = oi.to(q_dtype)
        mi = mi_new

    oi = (oi / li[:, None]).to(q_dtype)
    tl.store(
        o_ptr + i_offs[:, None] * stride_ol + head_dim_offs[None, :] * stride_od,
        oi,
        mask=(i_offs[:, None] < q_len),
    )


def apply_attn(
    query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
    query = query.contiguous()
    key = key.contiguous()
    value = value.contiguous()

    bsz, num_q_heads, q_len, head_dim = query.shape
    _, num_kv_heads, kv_len, _ = key.shape
    num_groups = num_q_heads // num_kv_heads
    O = torch.zeros_like(query)
    BLOCK_SIZE = 32
    scaling = head_dim**-0.5

    pad_bsz = triton.next_power_of_2(bsz)
    pad_num_q_heads = triton.next_power_of_2(num_q_heads)
    pad_q_len = triton.next_power_of_2(q_len)
    pad_kv_len = triton.next_power_of_2(kv_len)
    Br = min(BLOCK_SIZE, max(pad_q_len, 16))
    Bc = BLOCK_SIZE
    Tr = triton.cdiv(pad_q_len, Br)
    grid = (pad_bsz, pad_num_q_heads, Tr)

    _attn_kernel[grid](
        q_ptr=query,
        k_ptr=key,
        v_ptr=value,
        o_ptr=O,
        num_groups=num_groups,
        scaling=scaling,
        stride_qb=query.stride(0),
        stride_qh=query.stride(1),
        stride_ql=query.stride(2),
        stride_qd=query.stride(3),
        stride_kb=key.stride(0),
        stride_kh=key.stride(1),
        stride_kl=key.stride(2),
        stride_kd=key.stride(3),
        stride_vb=value.stride(0),
        stride_vh=value.stride(1),
        stride_vl=value.stride(2),
        stride_vd=value.stride(3),
        stride_ob=O.stride(0),
        stride_oh=O.stride(1),
        stride_ol=O.stride(2),
        stride_od=O.stride(3),
        bsz=bsz,
        num_q_heads=num_q_heads,
        q_len=q_len,
        kv_len=kv_len,
        head_dim=head_dim,
        pad_kv_len=pad_kv_len,
        Br=Br,
        Bc=Bc,
    )

    return O
