import sys 
import os
import random
import pytest
import torch
import torch_npu
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), "../src"))
from lightning_attn_triton import lightning_attention
from lightning_attn_torch import reference_lightning_attention
from gen_test_input import gen_input, TEST_CASES
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../precision"))
from compare import check_operator_accuracy
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../profiler"))
from ascend_profiler import ascend_profiler_wrapper

# 随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed) 
random.seed(seed)
# 运行设备
torch.npu.set_device(6)




def prof_lightning_attention(
    test_case
):

    q, k, v, ed, kv_history, block_size = gen_input(*test_case)
    kv_history_clone = kv_history.clone()
    # profiling
    def call(): 
        # ref_output, ref_kv_cache, ref_kv = reference_lightning_attention(
        #     q, k, v, ed, kv_history, block_size)

        actual_output, actual_kv_cache, actual_kv = lightning_attention(
            q, k, v, ed, kv_history_clone, block_size)
        
    # prof path
    prof_dir = "./prof_result/"
    case_str = "x".join(map(str, test_case))
    result_path = prof_dir + case_str + "/"
    os.makedirs(result_path, exist_ok=True)
    ascend_profiler_wrapper(call, result_path, )




if __name__ == "__main__": 

    print("=" * 50, "\n")

    for i in range(len(TEST_CASES)): 

        print(f"case {i} "+"=" * 50)
        test_case = TEST_CASES[i]
        cmp_results = prof_lightning_attention(test_case)
        
    print("=" * 50, "\n")

