import pytest
import torch
import torch_npu
from lightning_attn import lightning_attention

NUM_HEADS = [1, 2]
HEAD_SIZES = [16]
BATCH_SIZES = [1, 2]
SEQ_LENGTHS = [16]
DTYPES = [torch.float16, torch.bfloat16]
DEVICE = "npu"

# def reference_lightning_attention(q, k, v, ed, block_size, kv_history):
#     """Reference implementation of lightning attention core algorithm
    
#     The difference from the main implementation is that this processes 
#     each step sequentially, instead of using parallelized triton kernels
#     """
#     B, H, S, D = q.shape
#     E = v.shape[-1]
#     dtype = q.dtype
#     output = torch.zeros((B, H, S, E), dtype=dtype, device=q.device)

#     if kv_history is None:
#         kv_cache = torch.zeros((B, H, D, E), dtype=dtype, device=q.device)
#     else:
#         kv_cache = kv_history.clone()

#     if ed.dim() == 1:
#         decay = torch.exp(-ed).view(1, -1, 1, 1)
#     else:
#         decay = torch.exp(-ed)

#     for b in range(B):
#         for step in range(S):
#             q_bs = q[b, :, step]
#             k_bs = k[b, :, step]
#             v_bs = v[b, :, step]

#             for h in range(H):
#                 kv_outer = torch.outer(k_bs[h], v_bs[h])

#                 kv_cache[b, h] = decay[0, h, 0, 0] * kv_cache[b, h] + kv_outer

#                 output[b, h, step] = torch.matmul(q_bs[h], kv_cache[b, h])

#     kv_reshaped = kv_cache.unsqueeze(2).to(torch.float32)
#     final_kv_cache = torch.cat([kv_reshaped, kv_reshaped], dim=2)

#     return output, final_kv_cache

@pytest.mark.parametrize("batch_size", BATCH_SIZES)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("seq_len", SEQ_LENGTHS)
@pytest.mark.parametrize("dtype", DTYPES)
@torch.inference_mode()
def test_lightning_attention_reference(
    batch_size: int,
    num_heads: int,
    head_size: int,
    seq_len: int,
    dtype: torch.dtype,
):
    torch.set_default_device(DEVICE)
    torch.manual_seed(42)
    torch.npu.set_device(6)

    base = 0.01
    q = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)
    k = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)
    v = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)

    ed = torch.zeros(num_heads, dtype=dtype, device=DEVICE)
    for h in range(num_heads):
        ed[h] = 0.1 * (h + 1)

    kv_history = base * torch.randn(batch_size,
                                    num_heads,
                                    head_size,
                                    head_size,
                                    dtype=dtype,
                                    device=DEVICE)

    kv_history_clone = kv_history.clone()

    # ref_output, ref_kv_cache = reference_lightning_attention(
    #     q, k, v, ed, 256, kv_history)

    actual_output, actual_kv_cache = lightning_attention(
        q, k, v, ed, 256, kv_history_clone)

    atol, rtol = 1.5e-1, 1.5e-1
    torch.testing.assert_close(ref_output, actual_output, rtol=rtol, atol=atol)
    torch.testing.assert_close(ref_kv_cache,
                               actual_kv_cache,
                               rtol=rtol,
                               atol=atol)

    assert ref_output.shape == (batch_size, num_heads, seq_len, head_size)
    assert ref_kv_cache.shape == actual_kv_cache.shape