import sys 
import os
import random
import torch
import numpy as np
import math

sys.path.append(os.path.join(os.path.dirname(__file__), "../src"))
from block_sparse_attn_triton import block_sparse_attention, get_sparse_attn_mask
from block_sparse_attn_torch import reference_block_sparse_attention
from gen_test_input import gen_input, TEST_CASES, TEST_CASE_HEADER

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../precision"))
from compare import check_operator_accuracy

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../profiler"))
from triton_profiler import triton_profiler_wrapper, times_list_header

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../utils"))
# from utils import write_csv
from templates import prof_template

# 运行设备
DEVICE = "npu"
torch.set_default_device(DEVICE)
torch.npu.set_device(6)
# 随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed) 
random.seed(seed)

def prof_block_sparse_attention(
    test_case,
):
    q, k, v, homo_head, block_size, local_blocks, vert_strides = gen_input(*test_case)
    scale = 1.0 / math.sqrt(float(q.shape[3]))

    # 取舍是否进行测量时间
    mask_csr, block_sparse_pattern, mask_dense = get_sparse_attn_mask(
        q, k.shape[2], block_size=block_size,
        local_blocks=local_blocks, 
        vert_strides=vert_strides, 
        homo_head=homo_head, 
        return_dense=True
    )

    def call():
        actual_output = block_sparse_attention(
            q, k, v, block_size, local_blocks, 
            vert_strides, homo_head, scale, block_sparse_pattern
        )

    times_list = triton_profiler_wrapper(call)
    print(times_list)
    return times_list

if __name__ == "__main__": 
    csv_header_list = TEST_CASE_HEADER + times_list_header
    prof_template("BlockSparseAttn", TEST_CASES, prof_block_sparse_attention, csv_header_list)