import triton
import triton.language as tl
import test_common
import pytest
import torch
import torch_npu
import numpy as np
import random
import os
from triton.runtime.driver import driver

DEVICE = "npu"
SEED = 42

def get_soc_info():
    target = driver.active.get_current_target()
    device = driver.active.get_current_device()
    prop = driver.active.utils.get_device_properties(device)
    return prop

TEST_CASES = [
    ["float16", (1, 1024)],
    ["float16", (1, 2048)],
    ["float16", (1, 4096)],
    ["float16", (1, 5120)],
    ["float16", (1, 6656)],
    ["float16", (1, 8192)],
    ["float16", (128, 1024)],
    ["float16", (128, 2048)],
    ["float16", (128, 4096)],
    ["float16", (128, 5120)],
    ["float16", (128, 6656)],
    ["float16", (128, 8192)],
    ["float16", (256, 1024)],
    ["float16", (256, 2048)],
    ["float16", (256, 4096)],
    ["float16", (256, 5120)],
    ["float16", (256, 6656)],
    ["float16", (256, 8192)],
    ["float16", (384, 1024)],
    ["float16", (384, 2048)],
    ["float16", (384, 4096)],
    ["float16", (384, 5120)],
    ["float16", (384, 6656)],
    ["float16", (384, 8192)],
    ["float16", (512, 1024)],
    ["float16", (512, 2048)],
    ["float16", (512, 4096)],
    ["float16", (512, 5120)],
    ["float16", (512, 6656)],
    ["float16", (512, 8192)],
    ["float32", (1, 1024)],
    ["float32", (1, 2048)],
    ["float32", (1, 4096)],
    ["float32", (1, 5120)],
    ["float32", (1, 6656)],
    ["float32", (1, 8192)],
    ["float32", (128, 1024)],
    ["float32", (128, 2048)],
    ["float32", (128, 4096)],
    ["float32", (128, 5120)],
    ["float32", (128, 6656)],
    ["float32", (128, 8192)],
    ["float32", (256, 1024)],
    ["float32", (256, 2048)],
    ["float32", (256, 4096)],
    ["float32", (256, 5120)],
    ["float32", (256, 6656)],
    ["float32", (256, 8192)],
    ["float32", (384, 1024)],
    ["float32", (384, 2048)],
    ["float32", (384, 4096)],
    ["float32", (384, 5120)],
    ["float32", (384, 6656)],
    ["float32", (384, 8192)],
    ["float32", (512, 1024)],
    ["float32", (512, 2048)],
    ["float32", (512, 4096)],
    ["float32", (512, 5120)],
    ["float32", (512, 6656)],
    ["float32", (512, 8192)],
    ["bfloat16", (1, 1024)],
    ["bfloat16", (1, 2048)],
    ["bfloat16", (1, 4096)],
    ["bfloat16", (1, 5120)],
    ["bfloat16", (1, 6656)],
    ["bfloat16", (1, 8192)],
    ["bfloat16", (128, 1024)],
    ["bfloat16", (128, 2048)],
    ["bfloat16", (128, 4096)],
    ["bfloat16", (128, 5120)],
    ["bfloat16", (128, 6656)],
    ["bfloat16", (128, 8192)],
    ["bfloat16", (256, 1024)],
    ["bfloat16", (256, 2048)],
    ["bfloat16", (256, 4096)],
    ["bfloat16", (256, 5120)],
    ["bfloat16", (256, 6656)],
    ["bfloat16", (256, 8192)],
    ["bfloat16", (384, 1024)],
    ["bfloat16", (384, 2048)],
    ["bfloat16", (384, 4096)],
    ["bfloat16", (384, 5120)],
    ["bfloat16", (384, 6656)],
    ["bfloat16", (384, 8192)],
    ["bfloat16", (512, 1024)],
    ["bfloat16", (512, 2048)],
    ["bfloat16", (512, 4096)],
    ["bfloat16", (512, 5120)],
    ["bfloat16", (512, 6656)],
    ["bfloat16", (512, 8192)],
    ["float16", (1, 1024)],
    ["float16", (1, 2048)],
    ["float16", (1, 4096)],
    ["float16", (1, 5120)],
    ["float16", (1, 6656)],
    ["float16", (1, 8192)],
    ["float16", (128, 1024)],
    ["float16", (128, 2048)],
    ["float16", (128, 4096)],
    ["float16", (128, 5120)],
]

dtype_map = {
    "float32": torch.float32,
    "float16": torch.float16,
    "bfloat16": torch.bfloat16,
    "float64": torch.float64,
    "int8": torch.int8,
    "int16": torch.int16,
    "int32": torch.int32,
    "int64": torch.int64,
    "uint8": torch.uint8,
    "bool": torch.bool,
}

def silu_torch(x):
    dtype = x.dtype
    x = x.to(torch.float32)
    res = x * (1/(1+torch.exp(-x)))
    return res.to(dtype)


@triton.jit
def silu_triton(
    x_ptr,
    x_stride,
    y_ptr,
    y_stride,
    n_rows,
    n_cols,
    BLOCK_SIZE: tl.constexpr,
):
    row_start = tl.program_id(0)
    row_steps = tl.num_programs(0)
    for row_idx in tl.range(row_start, n_rows, row_steps):
        x_ptr_base = x_ptr + row_idx * x_stride
        y_ptr_base = y_ptr + row_idx * y_stride
        for col_idx in tl.range(0, n_cols, BLOCK_SIZE):
            cols = col_idx + tl.arange(0, BLOCK_SIZE)
            x = tl.load(x_ptr_base + cols, mask=cols < n_cols, other=0.0)
            x_fp32 = x.to(tl.float32)
            sigmoid_x = 1 / (1 + tl.exp(-x_fp32))
            ret = x_fp32 * sigmoid_x
            tl.store(y_ptr_base + cols, ret.to(y_ptr.dtype.element_ty), mask=cols < n_cols)

@torch.inference_mode()
def silu(x):
    dtype = x.dtype
    device = x.device
    y = torch.empty_like(x).to(device)
    prop = get_soc_info()
    cube_core = prop["num_aicore"]
    vector_core = prop["num_vectorcore"]
    n_rows, n_cols = x.shape
    # UB大小为256KB
    MAX_FUSED_SIZE = 131072 // x.element_size()
    BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(n_cols))
    num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
    grid = (min(vector_core, n_rows), 1, 1)
    silu_triton[grid](
        x, x.stride(0),
        y, y.stride(0),
        n_rows, n_cols,
        BLOCK_SIZE=BLOCK_SIZE,
        num_warps=num_warps, num_ctas=1
    )
    return y
    
@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_silu(test_case):
    (sigtype, shape) = test_case
    dtype = dtype_map[sigtype]
    torch.set_default_device(DEVICE)
    torch.manual_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)
    torch.npu.set_device(1)
    x = test_common.generate_tensor(shape, sigtype).npu()
    # y_triton = silu(x)
    stream = torch.npu.current_stream()
    experimental_config = torch_npu.profiler._ExperimentalConfig(
        aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
        profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
        l2_cache=True ,
        data_simplification=False
    )
    test_str = "x".join(map(str, (sigtype, *shape, SEED)))
    triton_path = "./prof_v2/" + test_str + "/"
    # triton_path = "./prof/"
    os.makedirs(triton_path, exist_ok=True)
    LOOP = 20
    with torch_npu.profiler.profile(
        activities=[
            torch_npu.profiler.ProfilerActivity.NPU
        ],
        schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
        on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
        record_shapes=False,
        profile_memory=False,
        with_stack=False,
        with_flops=False,
        with_modules=False,
        experimental_config=experimental_config) as prof:

        stream.synchronize()
        prof.step()
        for i in range(LOOP + 2):
            y_triton = silu(x)
            prof.step()
            if i == 0:
                stream.synchronize()
        stream.synchronize()
    # y_torch = silu_torch(x)
    # test_common.validate_cmp(sigtype, y_triton, y_torch)
