import random
import torch
import torch_npu
import numpy as np
import triton
import math

def reference_unified_attention(
    q, k, v, kv_lens, block_table,
    scale, sliding_window, soft_cap
):
    dtype = q.dtype
    num_seqs, q_len, num_query_heads, head_size = q.shape
    _, block_size, num_kv_heads, head_size = k.shape

    outputs = []
    for i in range(num_seqs):
        kv_len = kv_lens[i]
        num_kv_blocks = (kv_len + block_size - 1) // block_size
        block_indices = block_table[i, :num_kv_blocks]
        k_i = k[block_indices]
        v_i = v[block_indices]

        k_new = k_i.view(-1, num_kv_heads, head_size)
        k_j = k_new[:kv_len]
        v_new = v_i.view(-1, num_kv_heads, head_size)
        v_j = v_new[:kv_len]
        q_i = q[i].view(-1, num_query_heads, head_size)
        
        if q_i.shape[1] != k_j.shape[1]:
            k_j = torch.repeat_interleave(k_j, q_i.shape[1] // k_j.shape[1], dim=1)
            v_j = torch.repeat_interleave(v_j, q_i.shape[1] // v_j.shape[1], dim=1)

        attn = torch.einsum("mhd,nhd->hmn", q_i.to(torch.float32), k_j.to(torch.float32)).to(torch.float32) * scale
        # 获取三角形掩码
        empty_mask = torch.ones(q_len, kv_len)
        mask = torch.triu(empty_mask, diagonal=kv_len - q_len + 1).bool()
        # 先进行 soft_cap操作
        if soft_cap is not None and soft_cap > 0:
            attn = soft_cap * torch.tanh(attn / soft_cap).to(torch.float32)
        
        # 添加sliding_window掩码
        if sliding_window is not None:
            sliding_window_mask = torch.triu(empty_mask,
                                             diagonal=kv_len -
                                             (q_len + sliding_window) +
                                             1).bool().logical_not()
            mask |= sliding_window_mask

        attn.masked_fill_(mask, float("-inf"))
        attn = torch.softmax(attn, dim=-1).to(torch.float32)
        output_i = torch.einsum('hmn,nhd->mhd', attn, v_j.to(torch.float32)).to(dtype)
        outputs.append(output_i)

    return torch.cat(outputs, dim=0)