import random
import torch
import torch_npu
import numpy as np
import triton
import math

def reference_block_sparse_attention(q, k, v, attn_mask=None, scale=None, block_attn_mask=None, block_size=64, do=None):
    dtype = q.dtype
    q_k_ratio = q.shape[1] // k.shape[1]
    if q_k_ratio > 1:
        k = torch.repeat_interleave(k, q_k_ratio, dim=1)
        v = torch.repeat_interleave(v, q_k_ratio, dim=1)
    if scale is None:
        scale = 1.0 / math.sqrt(float(q.size(-1)))

    if block_attn_mask is not None:
        assert attn_mask is None
        outs = []
        for s in range(0, q.size(2), block_size):
            e = min(s + block_size, q.size(2))
            q_block = q[:, :, s:e]
            attn = torch.einsum('bhmd,bhnd->bhmn', q_block.to(torch.float32), k[:, :, :e].to(torch.float32)).to(torch.float32) * scale
            mask = block_attn_mask[..., s // block_size, : (s // block_size + 1)]
            mask = torch.kron(mask, torch.ones(block_size, block_size, device=mask.device))
            mask[..., :, s:].masked_fill_(torch.arange(0, block_size)[:, None] <= torch.arange(0, block_size)[None, :], 0)
            attn = attn.masked_fill((1 - mask).bool(), float('-inf'))
            attn = attn.softmax(-1)
            out = torch.einsum('bhmn,bhnd->bhmd', attn, v[:, :, :e].to(torch.float32))
            outs.append(out.to(dtype))
        torch_output = torch.cat(outs, dim=2)
    else:
        attn = torch.einsum('bhmd,bhnd->bhmn', q.to(torch.float32), k.to(torch.float32)).to(torch.float32) * scale
        if attn_mask is not None:
            attn = attn.masked_fill((1 - attn_mask).bool(), float('-inf'))

        attn = attn.softmax(-1)
        if do is not None:
            dv = torch.einsum('bhqk,bhqd->bhkd', attn, do.to(torch.float32))
            print(f'> torch_attn computed dv: {dv=}')
        torch_output = torch.einsum('bhmn,bhnd->bhmd', attn, v.to(torch.float32)).to(dtype)
    return torch_output