import sys 
import os
import random
import torch
import numpy as np
import math

sys.path.append(os.path.join(os.path.dirname(__file__), "../src"))
from unified_attn_triton import unified_attention
from unified_attn_torch import reference_unified_attention
from gen_test_input import gen_input, TEST_CASES, TEST_CASE_HEADER

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../precision"))
from compare import check_operator_accuracy

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../profiler"))
from triton_profiler import triton_profiler_wrapper, times_list_header

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../utils"))
# from utils import write_csv
from templates import prof_template

# 运行设备
DEVICE = "npu"
torch.set_default_device(DEVICE)
torch.npu.set_device(1)
# 随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed) 
random.seed(seed)

def prof_unified_attention(
    test_case,
):
    (q_len, kv_lens, num_heads, head_size, num_blocks, block_size, sliding_window, soft_cap, dtype) = test_case
    batch_size = len(kv_lens)
    q, k, v, block_table, kv_lens, sliding_window, soft_cap = gen_input(*test_case)
    scale = 1.0 / math.sqrt(float(q.shape[3]))

    def call():
        actual_output = unified_attention(
            q, k, v, kv_lens, block_table,
            scale, sliding_window, soft_cap,
        ).flatten(0, 1)

    times_list = triton_profiler_wrapper(call)
    print(times_list)
    return times_list

if __name__ == "__main__": 
    csv_header_list = TEST_CASE_HEADER + times_list_header
    prof_template("UnifiedAttn", TEST_CASES, prof_unified_attention, csv_header_list)