import random
import torch
import torch_npu
import numpy as np
import triton


def reference_lightning_attention(q, k, v, ed, kv_history, block_size):
    """Reference implementation of lightning attention core algorithm
    
    The difference from the main implementation is that this processes 
    each step sequentially, instead of using parallelized triton kernels
    """
    cal_type = torch.float32
    input_type = q.dtype

    # S -> tokens or n
    B, H, S, D = q.shape
    E = v.shape[-1]
    output = torch.zeros((B, H, S, E), dtype = cal_type, device = q.device)

    # Use clone() to ensure an independent copy
    # kv_cache.shape == (B, H, D, E)
    if kv_history is None:
        kv_cache = torch.zeros((B, H, D, E), dtype=cal_type, device=q.device)
    else:
        kv_cache = kv_history.to(cal_type).clone()
    
    num_block = triton.cdiv(S, block_size)
    kv = torch.zeros((B, H, num_block, D, E), dtype = cal_type, device=q.device)
    

    # More efficient implementation
    # Convert decay factors to matrix form
    # ed.shape = (h, ) -> (1, h, 1, 1)
    if ed.dim() == 1: 
        decay = torch.exp(-ed.to(cal_type)).view(1, -1, 1, 1)
    else:
        decay = torch.exp(-ed.to(cal_type))

    for b in range(B):
        # Calculate KV outer products for all heads
        for h in range(H): 

            kv[b, h, 0] = kv_history[b, h].to(cal_type)

            for step in range(S): 
                # Process all heads at once for this position
                # shape = (D)
                q_bhs = q[b, h, step].to(cal_type)
                k_bhs = k[b, h, step].to(cal_type)
                # shape = (E)
                v_bhs = v[b, h, step].to(cal_type)

                # 对当前 batch 的 当前  token，遍历每个 head。计算 kv 并更新 kv_cache，再与Q计算
                # Calculate KV outer product
                kv_outer = torch.outer(k_bhs, v_bhs)

                # Update KV cache with decay
                # Note: Using the same order as in the Triton kernel
                # kv_cache[b, h].shape == (D, E)
                # 对于同一 b,h ，在 step（表示tokens）增加的时候进行更新
                # 注意，在这里已经隐含了掩码的信息，因为是逐个kv计算的
                kv_cache[b, h] = decay[0, h, 0, 0] * kv_cache[b, h] + kv_outer

                if (step+1)%block_size == 0 and step+1<S: 
                    kv[b, h, (step+1)//block_size] = kv_cache[b, h]

                # q_bs[h].shape == (D), kv_cache.shape == (D, E)
                # 计算结果为 batch 的当前 token 的 当前 head 的 qkv结果，shape = (E)
                # Calculate attention output
                output[b, h, step] = torch.matmul(q_bhs, kv_cache[b, h])

    # Match the shape returned by the actual implementation
    # The actual implementation returns a tensor of shape [B, H, 2, D, E]
    # where dimension 2 contains both KV and KV history
    # FIXME 这里该cat的是新旧kv_cache
    # kv_reshaped_cast = kv_cache.unsqueeze(2).to(torch.float32).to(input_type)  # [B, H, 1, D, E]
    # final_kv_cache = torch.cat([kv_reshaped_cast, kv_reshaped_cast], dim=2)  # [B, H, 2, D, E]

    final_output = output.to(input_type)
    final_kv_cache = kv_cache.to(input_type)
    final_kv = kv.to(input_type)
    return final_output, final_kv_cache, final_kv
