import random
import pytest
import torch
import torch_npu
import test_common
from lightning_attn import lightning_attention
import numpy as np

# NUM_HEADS = [1, 2]
# HEAD_SIZES = [16]
# BATCH_SIZES = [1, 2]
# SEQ_LENGTHS = [16]
# DTYPES = [torch.float16, torch.bfloat16]
NUM_HEADS = [4, 8, 12, 16]
HEAD_SIZES = [32, 64, 128]
BATCH_SIZES = [1, 4, 8, 16]
SEQ_LENGTHS = [128, 256, 512, 1024]
DTYPES = [torch.float16, torch.bfloat16]
DEVICE = "npu"

# def reference_lightning_attention(q, k, v, ed, block_size, kv_history):
#     """Reference implementation of lightning attention core algorithm
    
#     The difference from the main implementation is that this processes 
#     each step sequentially, instead of using parallelized triton kernels
#     """
#     B, H, S, D = q.shape
#     E = v.shape[-1]
#     dtype = q.dtype
#     output = torch.zeros((B, H, S, E), dtype=dtype, device=q.device)

#     if kv_history is None:
#         kv_cache = torch.zeros((B, H, D, E), dtype=dtype, device=q.device)
#     else:
#         kv_cache = kv_history.clone()

#     if ed.dim() == 1:
#         decay = torch.exp(-ed).view(1, -1, 1, 1)
#     else:
#         decay = torch.exp(-ed)

#     for b in range(B):
#         for step in range(S):
#             q_bs = q[b, :, step]
#             k_bs = k[b, :, step]
#             v_bs = v[b, :, step]

#             for h in range(H):
#                 kv_outer = torch.outer(k_bs[h], v_bs[h])

#                 kv_cache[b, h] = decay[0, h, 0, 0] * kv_cache[b, h] + kv_outer

#                 output[b, h, step] = torch.matmul(q_bs[h], kv_cache[b, h])

#     kv_reshaped = kv_cache.unsqueeze(2).to(torch.float32)
#     final_kv_cache = torch.cat([kv_reshaped, kv_reshaped], dim=2)

#     return output, final_kv_cache


def reference_lightning_attention(q, k, v, ed, block_size, kv_history):
    """Reference implementation of lightning attention core algorithm
    
    The difference from the main implementation is that this processes 
    each step sequentially, instead of using parallelized triton kernels
    """
    B, H, S, D = q.shape
    E = v.shape[-1]
    dtype = q.dtype
    output = torch.zeros((B, H, S, E), dtype=dtype, device=q.device)

    if kv_history is None:
        kv_cache = torch.zeros((B, H, D, E), dtype=dtype, device=q.device)
    else:
        kv_cache = kv_history.clone()

    if ed.dim() == 1:
        decay = torch.exp(-ed).view(1, -1, 1, 1)
    else:
        decay = torch.exp(-ed)

    # Original sequential computation for output
    for b in range(B):
        for step in range(S):
            q_bs = q[b, :, step]
            k_bs = k[b, :, step]
            v_bs = v[b, :, step]

            for h in range(H):
                kv_outer = torch.outer(k_bs[h], v_bs[h])

                kv_cache[b, h] = decay[0, h, 0, 0] * kv_cache[b, h] + kv_outer

                output[b, h, step] = torch.matmul(q_bs[h], kv_cache[b, h])

    # Now compute per-block incremental kv for kv_cache shape matching
    NUM_BLOCK = (S + block_size - 1) // block_size
    kv_blocks = [torch.zeros((B, H, D, E), dtype=dtype, device=q.device) for _ in range(NUM_BLOCK)]

    # Compute weights for each step: decay_h ** (S - 1 - step)
    weights = torch.zeros((H, S), dtype=dtype, device=q.device)
    for h in range(H):
        dec_h = decay[0, h, 0, 0]
        for step in range(S):
            weights[h, step] = dec_h ** (S - 1 - step)

    # Compute per-block contributions (independent of previous)
    for b in range(B):
        for step in range(S):
            block_idx = min(step // block_size, NUM_BLOCK - 1)
            q_bs = q[b, :, step]  # Not used for kv_blocks
            k_bs = k[b, :, step]
            v_bs = v[b, :, step]
            for h in range(H):
                kv_outer = torch.outer(k_bs[h], v_bs[h])
                kv_blocks[block_idx][b, h] += weights[h, step] * kv_outer

    # Full incremental = sum kv_blocks
    full_incremental = sum(kv_blocks)

    # Decayed initial history: initial * decay ** S per h
    power_decay = torch.zeros((B, H, 1, 1), dtype=dtype, device=q.device)
    for h in range(H):
        dec_h = decay[0, h, 0, 0]
        power_decay[:, h] = dec_h ** S
    decayed_history = kv_history * power_decay

    # Updated kv_cache = decayed_history + full_incremental
    updated_kv_cache = decayed_history + full_incremental

    # Reshape for final kv_cache: per-block incrementals + updated as last "block"
    kv_reshaped_blocks = [kb.unsqueeze(2).to(torch.float32) for kb in kv_blocks]
    updated_reshaped = updated_kv_cache.unsqueeze(2).to(torch.float32)
    final_kv_cache = torch.cat(kv_reshaped_blocks + [updated_reshaped], dim=2)

    return output, final_kv_cache

TEST_CASES = [
    # 基础测试集：常见批量与中等序列长度组合
    [1, 4, 64, 512, torch.float16],
    [1, 4, 64, 1024, torch.float16],
    [1, 8, 64, 512, torch.float16],
    [1, 8, 64, 1024, torch.float16],
    [1, 12, 64, 512, torch.float16],
    [1, 12, 64, 1024, torch.float16],
    [1, 16, 64, 512, torch.float16],
    [1, 16, 64, 1024, torch.float16],
    # =========
    # [1, 4, 128, 512, torch.float16],
    # [1, 4, 128, 1024, torch.float16],
    # [1, 8, 128, 512, torch.float16],
    # [1, 8, 128, 1024, torch.float16],
    # [1, 12, 128, 512, torch.float16],
    # [1, 12, 128, 1024, torch.float16],
    # [1, 16, 128, 512, torch.float16],
    # [1, 16, 128, 1024, torch.float16],
    # [1, 4, 32, 512, torch.float16],
    # [1, 4, 32, 1024, torch.float16],
    # [1, 8, 32, 512, torch.float16],
    # [1, 8, 32, 1024, torch.float16],
    # =========
    
    # 批量扩展测试
    [4, 4, 64, 512, torch.float16],
    [4, 4, 64, 1024, torch.float16],
    [4, 8, 64, 512, torch.float16],
    [4, 8, 64, 1024, torch.float16],
    [4, 12, 64, 512, torch.float16],
    [4, 12, 64, 1024, torch.float16],
    [4, 16, 64, 512, torch.float16],
    [4, 16, 64, 1024, torch.float16],
    # ===========
    # [4, 4, 128, 512, torch.float16],
    # [4, 4, 128, 1024, torch.float16],
    # [4, 8, 128, 512, torch.float16],
    # [4, 8, 128, 1024, torch.float16],
    # [4, 12, 128, 512, torch.float16],
    # [4, 12, 128, 1024, torch.float16],
    # [4, 16, 128, 512, torch.float16],
    # [4, 16, 128, 1024, torch.float16],
    # [4, 4, 32, 512, torch.float16],
    # [4, 4, 32, 1024, torch.float16],
    # [4, 8, 32, 512, torch.float16],
    # [4, 8, 32, 1024, torch.float16],
    # ===========
    
    # 大批量测试
    [8, 4, 64, 512, torch.float16],
    [8, 4, 64, 1024, torch.float16],
    [8, 8, 64, 512, torch.float16],
    [8, 8, 64, 1024, torch.float16],
    [8, 12, 64, 512, torch.float16],
    [8, 12, 64, 1024, torch.float16],
    [8, 16, 64, 512, torch.float16],
    [8, 16, 64, 1024, torch.float16],
    # ===========
    # [8, 4, 128, 512, torch.float16],
    # [8, 4, 128, 1024, torch.float16],
    # [8, 8, 128, 512, torch.float16],
    # [8, 8, 128, 1024, torch.float16],
    # [8, 12, 128, 512, torch.float16],
    # [8, 12, 128, 1024, torch.float16],
    # [8, 16, 128, 512, torch.float16],
    # [8, 16, 128, 1024, torch.float16],
    # [8, 4, 32, 512, torch.float16],
    # [8, 4, 32, 1024, torch.float16],
    # [8, 8, 32, 512, torch.float16],
    # [8, 8, 32, 1024, torch.float16],
    # ===========
    
    # 超大批量测试
    [16, 4, 64, 512, torch.float16],
    [16, 4, 64, 1024, torch.float16],
    [16, 8, 64, 512, torch.float16],
    [16, 8, 64, 1024, torch.float16],
    [16, 12, 64, 512, torch.float16],
    [16, 12, 64, 1024, torch.float16],
    [16, 16, 64, 512, torch.float16],
    [16, 16, 64, 1024, torch.float16],
    # ===========
    [16, 4, 128, 512, torch.float16],
    [16, 4, 128, 1024, torch.float16],
    [16, 8, 128, 512, torch.float16],
    [16, 8, 128, 1024, torch.float16],
    [16, 12, 128, 512, torch.float16],
    [16, 12, 128, 1024, torch.float16],
    # 
    [16, 16, 128, 512, torch.float16],
    [16, 16, 128, 1024, torch.float16],
    [16, 4, 32, 512, torch.float16],
    [16, 4, 32, 1024, torch.float16],
    [16, 8, 32, 512, torch.float16],
    [16, 8, 32, 1024, torch.float16],
    
    # bfloat16类型测试集
    [1, 4, 64, 512, torch.bfloat16],
    [1, 8, 64, 1024, torch.bfloat16],
    [1, 12, 128, 512, torch.bfloat16],
    [1, 16, 128, 1024, torch.bfloat16],
    [4, 4, 64, 512, torch.bfloat16],
    [4, 8, 64, 1024, torch.bfloat16],
    [4, 12, 128, 512, torch.bfloat16],
    [4, 16, 128, 1024, torch.bfloat16],
    [8, 4, 64, 512, torch.bfloat16],
    [8, 8, 64, 1024, torch.bfloat16],
    [8, 12, 128, 512, torch.bfloat16],
    [8, 16, 128, 1024, torch.bfloat16],
    [16, 4, 64, 512, torch.bfloat16],
    [16, 8, 64, 1024, torch.bfloat16],
    [16, 12, 128, 512, torch.bfloat16],
    [16, 16, 128, 1024, torch.bfloat16],
    [1, 4, 32, 512, torch.bfloat16],
    [4, 8, 32, 1024, torch.bfloat16],
    [8, 12, 32, 512, torch.bfloat16],
    [16, 16, 32, 1024, torch.bfloat16],
    
    # # 长序列特殊测试
    # [1, 4, 64, 2048, torch.float16],
    # [1, 8, 64, 2048, torch.float16],
    # [1, 12, 64, 2048, torch.float16],
    # [1, 16, 64, 2048, torch.float16],
    # [4, 4, 64, 2048, torch.float16],
    # [4, 8, 64, 2048, torch.float16],
    # [1, 4, 128, 2048, torch.float16],
    # [1, 8, 128, 2048, torch.float16],
    # [1, 4, 64, 2048, torch.bfloat16],
    # [4, 8, 64, 2048, torch.bfloat16]
]

# @pytest.mark.parametrize("batch_size", BATCH_SIZES)
# @pytest.mark.parametrize("num_heads", NUM_HEADS)
# @pytest.mark.parametrize("head_size", HEAD_SIZES)
# @pytest.mark.parametrize("seq_len", SEQ_LENGTHS)
# @pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_lightning_attention_reference(
    # batch_size: int,
    # num_heads: int,
    # head_size: int,
    # seq_len: int,
    # dtype: torch.dtype,
    test_case
):
    (batch_size, num_heads, head_size, seq_len, dtype) = test_case
    torch.set_default_device(DEVICE)
    torch.manual_seed(42)
    np.random.seed(42) 
    random.seed(42)
    torch.npu.set_device(6)

    base = 0.01
    q = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)
    k = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)
    v = base * torch.randn(
        batch_size, num_heads, seq_len, head_size, dtype=dtype)

    ed = torch.zeros(num_heads, dtype=dtype, device=DEVICE)
    for h in range(num_heads):
        ed[h] = 0.1 * (h + 1)

    kv_history = base * torch.randn(batch_size,
                                    num_heads,
                                    head_size,
                                    head_size,
                                    dtype=dtype,
                                    device=DEVICE)

    kv_history_clone = kv_history.clone()

    ref_output, ref_kv_cache = reference_lightning_attention(
        q, k, v, ed, 256, kv_history)

    actual_output, actual_kv_cache = lightning_attention(
        q, k, v, ed, 256, kv_history_clone)

    # atol, rtol = 1.5e-1, 1.5e-1
    # torch.testing.assert_close(ref_output.cpu(), actual_output.cpu(), rtol=rtol, atol=atol)
    # torch.testing.assert_close(ref_kv_cache.cpu(),
    #                            actual_kv_cache.cpu(),
    #                            rtol=rtol,
    #                            atol=atol)

    # assert ref_output.shape == (batch_size, num_heads, seq_len, head_size)
    # assert ref_kv_cache.shape == actual_kv_cache.shape
    sigtype_dict = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16",
    }
    sigtype = sigtype_dict[dtype]
    test_common.validate_cmp(sigtype, actual_output, ref_output)
    test_common.validate_cmp(sigtype, actual_kv_cache, ref_kv_cache)