import torch
from mha import MultiHeadAttention as MHA
from mqa import MultiQueryAttention as MQA
from gqa import GroupQueryAttention as GQA
from mla_cd import MultiHeadLatentAttention as MLA_CD
from mla_cc import MultiHeadLatentAttention as MLA_CC
from mla_absorb import MultiHeadLatentAttention as MLA_ABSORB

bsz = 4
seq_len = 14
hidden_size = 1024
num_heads = 16
head_dim = 64
num_kv_heads = 8
q_lora_rank = 128
qk_rope_head_dim = 64
qk_nope_head_dim = 32
kv_lora_rank = 128
v_head_dim = 128
use_cache = True
device = "cuda" if torch.cuda.is_available() else "cpu"

hidden_states = torch.rand(bsz, seq_len, hidden_size, device=device)
mha = MHA(
    hidden_size=hidden_size, num_heads=num_heads, head_dim=head_dim, use_cache=use_cache
).to(device)
mqa = MQA(
    hidden_size=hidden_size, num_heads=num_heads, head_dim=head_dim, use_cache=use_cache
).to(device)
gqa = GQA(
    hidden_size=hidden_size,
    num_heads=num_heads,
    num_kv_heads=num_kv_heads,
    head_dim=head_dim,
).to(device)
mla_cd = MLA_CD(
    hidden_size=hidden_size,
    num_heads=num_heads,
    q_lora_rank=q_lora_rank,
    qk_rope_head_dim=qk_rope_head_dim,
    qk_nope_head_dim=qk_nope_head_dim,
    kv_lora_rank=kv_lora_rank,
    v_head_dim=v_head_dim,
    use_cache=use_cache,
).to(device)
mla_cc = MLA_CC(
    hidden_size=hidden_size,
    num_heads=num_heads,
    q_lora_rank=q_lora_rank,
    qk_rope_head_dim=qk_rope_head_dim,
    qk_nope_head_dim=qk_nope_head_dim,
    kv_lora_rank=kv_lora_rank,
    v_head_dim=v_head_dim,
    use_cache=use_cache,
).to(device)
mla_absorb = MLA_ABSORB(
    hidden_size=hidden_size,
    num_heads=num_heads,
    q_lora_rank=q_lora_rank,
    qk_rope_head_dim=qk_rope_head_dim,
    qk_nope_head_dim=qk_nope_head_dim,
    kv_lora_rank=kv_lora_rank,
    use_cache=use_cache,
).to(device)

print(f"{mha(hidden_states).shape=}")
print(f"{mqa(hidden_states).shape=}")
print(f"{gqa(hidden_states).shape=}")
print(f"{mla_cd(hidden_states).shape=}")
print(f"{mla_cc(hidden_states).shape=}")
print(f"{mla_absorb(hidden_states).shape=}")
