import triton
import triton.language as tl
import test_common
import pytest
import torch
import torch_npu
import numpy as np
import random
import os
import math
from triton.runtime.driver import driver

DEVICE = "npu"
SEED = 42

def get_soc_info():
    target = driver.active.get_current_target()
    device = driver.active.get_current_device()
    prop = driver.active.utils.get_device_properties(device)
    return prop

TEST_CASES = [
    ["float16", (1, 1024)],
    ["float16", (1, 2048)],
    ["float16", (1, 4096)],
    ["float16", (1, 5120)],
    ["float16", (1, 6656)],
    ["float16", (1, 8192)],
    ["float16", (128, 1024)],
    ["float16", (128, 2048)],
    ["float16", (128, 4096)],
    ["float16", (128, 5120)],
    ["float16", (128, 6656)],
    ["float16", (128, 8192)],
    ["float16", (256, 1024)],
    ["float16", (256, 2048)],
    ["float16", (256, 4096)],
    ["float16", (256, 5120)],
    ["float16", (256, 6656)],
    ["float16", (256, 8192)],
    ["float16", (384, 1024)],
    ["float16", (384, 2048)],
    ["float16", (384, 4096)],
    ["float16", (384, 5120)],
    ["float16", (384, 6656)],
    ["float16", (384, 8192)],
    ["float16", (512, 1024)],
    ["float16", (512, 2048)],
    ["float16", (512, 4096)],
    ["float16", (512, 5120)],
    ["float16", (512, 6656)],
    ["float16", (512, 8192)],
    ["float32", (1, 1024)],
    ["float32", (1, 2048)],
    ["float32", (1, 4096)],
    ["float32", (1, 5120)],
    ["float32", (1, 6656)],
    ["float32", (1, 8192)],
    ["float32", (128, 1024)],
    ["float32", (128, 2048)],
    ["float32", (128, 4096)],
    ["float32", (128, 5120)],
    ["float32", (128, 6656)],
    ["float32", (128, 8192)],
    ["float32", (256, 1024)],
    ["float32", (256, 2048)],
    ["float32", (256, 4096)],
    ["float32", (256, 5120)],
    ["float32", (256, 6656)],
    ["float32", (256, 8192)],
    ["float32", (384, 1024)],
    ["float32", (384, 2048)],
    ["float32", (384, 4096)],
    ["float32", (384, 5120)],
    ["float32", (384, 6656)],
    ["float32", (384, 8192)],
    ["float32", (512, 1024)],
    ["float32", (512, 2048)],
    ["float32", (512, 4096)],
    ["float32", (512, 5120)],
    ["float32", (512, 6656)],
    ["float32", (512, 8192)],
    ["bfloat16", (1, 1024)],
    ["bfloat16", (1, 2048)],
    ["bfloat16", (1, 4096)],
    ["bfloat16", (1, 5120)],
    ["bfloat16", (1, 6656)],
    ["bfloat16", (1, 8192)],
    ["bfloat16", (128, 1024)],
    ["bfloat16", (128, 2048)],
    ["bfloat16", (128, 4096)],
    ["bfloat16", (128, 5120)],
    ["bfloat16", (128, 6656)],
    ["bfloat16", (128, 8192)],
    ["bfloat16", (256, 1024)],
    ["bfloat16", (256, 2048)],
    ["bfloat16", (256, 4096)],
    ["bfloat16", (256, 5120)],
    ["bfloat16", (256, 6656)],
    ["bfloat16", (256, 8192)],
    ["bfloat16", (384, 1024)],
    ["bfloat16", (384, 2048)],
    ["bfloat16", (384, 4096)],
    ["bfloat16", (384, 5120)],
    ["bfloat16", (384, 6656)],
    ["bfloat16", (384, 8192)],
    ["bfloat16", (512, 1024)],
    ["bfloat16", (512, 2048)],
    ["bfloat16", (512, 4096)],
    ["bfloat16", (512, 5120)],
    ["bfloat16", (512, 6656)],
    ["bfloat16", (512, 8192)],
    ["float16", (1, 1024)],
    ["float16", (1, 2048)],
    ["float16", (1, 4096)],
    ["float16", (1, 5120)],
    ["float16", (1, 6656)],
    ["float16", (1, 8192)],
    ["float16", (128, 1024)],
    ["float16", (128, 2048)],
    ["float16", (128, 4096)],
    ["float16", (128, 5120)],
]

dtype_map = {
    "float32": torch.float32,
    "float16": torch.float16,
    "bfloat16": torch.bfloat16,
    "float64": torch.float64,
    "int8": torch.int8,
    "int16": torch.int16,
    "int32": torch.int32,
    "int64": torch.int64,
    "uint8": torch.uint8,
    "bool": torch.bool,
}

def softmax_torch(x):
    dtype = x.dtype
    x_fp32 = x.to(torch.float32)
    x_max = x_fp32.max(dim=-1, keepdim=True)[0]
    z = x_fp32 - x_max
    numerator = torch.exp(z)
    denominator = numerator.sum(dim=-1, keepdim=True)
    ret = numerator / denominator
    return ret.to(dtype)


@triton.jit
def softmax_triton(
    x_ptr,
    x_stride,
    y_ptr,
    y_stride,
    n_rows,
    n_cols,
    BLOCK_ROWS: tl.constexpr,
    BLOCK_COLS: tl.constexpr,
):
    row_start = tl.program_id(0)
    row_steps = tl.num_programs(0)
    rows_loop = tl.cdiv(n_rows, BLOCK_ROWS)
    cols_loop = tl.cdiv(n_cols, BLOCK_COLS)
    for mBlockIdx in tl.range(row_start, rows_loop, row_steps):
        row_idx = mBlockIdx * BLOCK_ROWS
        row_max = tl.full((BLOCK_ROWS, ), -float('inf'), dtype=tl.float32)
        offs_m = row_idx + tl.arange(0, BLOCK_ROWS)
        for nBlockIdx in range(cols_loop):
            col_idx = nBlockIdx * BLOCK_COLS
            offs_n = col_idx + tl.arange(0, BLOCK_COLS)
            mask_m = offs_m < n_rows
            mask_n = offs_n < n_cols
            x = tl.load(
                x_ptr + offs_m[:, None] * x_stride + offs_n[None, :] ,
                mask=mask_m[:, None] and mask_n[None, :],
                other=-float('inf')
            ).to(tl.float32)
            # x_fp32 = x
            current_max = tl.max(x, axis=1)
            row_max = tl.maximum(row_max, current_max)

        exp_sum = tl.full((BLOCK_ROWS, BLOCK_COLS), 0.0, dtype=tl.float32)
        for nBlockIdx in range(cols_loop):
            col_idx = nBlockIdx * BLOCK_COLS
            offs_n = col_idx + tl.arange(0, BLOCK_COLS)
            mask_m = offs_m < n_rows
            mask_n = offs_n < n_cols
            x = tl.load(
                x_ptr + offs_m[:, None] * x_stride + offs_n[None, :] ,
                mask=mask_m[:, None] and mask_n[None, :],
                other=-float('inf')
            ).to(tl.float32)
            # x_fp32 = x.to(tl.float32)
            exp_x = tl.exp(x - row_max[:, None])
            exp_sum += exp_x

        sum_exp = tl.sum(exp_sum, axis=1)
        for nBlockIdx in range(cols_loop):
            col_idx = nBlockIdx * BLOCK_COLS
            offs_n = col_idx + tl.arange(0, BLOCK_COLS)
            mask_m = offs_m < n_rows
            mask_n = offs_n < n_cols
            x = tl.load(
                x_ptr + offs_m[:, None] * x_stride + offs_n[None, :] ,
                mask=mask_m[:, None] and mask_n[None, :],
                other=-float('inf')
            ).to(tl.float32)
            # x_fp32 = x.to(tl.float32)
            exp_x = tl.exp(x - row_max[:, None])
            softmax_val = exp_x / sum_exp[:, None]
            tl.store(
                y_ptr + offs_m[:, None] * y_stride + offs_n[None, :],
                softmax_val.to(y_ptr.dtype.element_ty),
                mask=mask_m[:, None] and mask_n[None, :]
            )


@torch.inference_mode()
def softmax(x):
    dtype = x.dtype
    device = x.device
    y = torch.empty_like(x).to(device)
    prop = get_soc_info()
    num_aicore = prop["num_aicore"]
    num_vectorcore = prop["num_vectorcore"]
    n_rows, n_cols = x.shape
    
    UB_SIZE = 16384  # 256KB UB大小
    elem_size = x.element_size()
    max_block_elems = UB_SIZE // elem_size
    # BLOCK_ROWS = triton.next_power_of_2(int(math.sqrt(max_block_elems)))
    # BLOCK_COLS = triton.next_power_of_2(int(max_block_elems // BLOCK_ROWS))

    # 之后调试这个切分的策略 选择到最优的切分块大小
    # ===== V2 =====
    # 这个切分策略应该是最好的了
    if n_rows == 1:
        BLOCK_ROWS = 1
        BLOCK_COLS = triton.next_power_of_2(min(n_cols, max_block_elems))
    elif n_rows < 256:
        if n_cols < 6656:
            # 多读几行
            BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
            BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
        else:
            # 少读几行
            BLOCK_ROWS = triton.next_power_of_2(min(2, n_rows))
            BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    else:
        if n_cols < 4096:
            # 多读几行
            BLOCK_ROWS = triton.next_power_of_2(min(8, n_rows))
            BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
        else:
            # 少读几行
            BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
            BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    # ===== V2 =====
    # ===== V3 =====
    # if n_rows == 1:
    #     BLOCK_ROWS = 1
    #     BLOCK_COLS = triton.next_power_of_2(min(n_cols, max_block_elems))
    # elif n_rows < 256:
    #     if n_cols < 6656:
    #         # 多读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    #     else:
    #         # 少读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(2, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    # else:
    #     if n_cols < 4096:
    #         # 多读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    #     else:
    #         # 少读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(2, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    # ===== V3 =====
    # ===== V4 =====
    # if n_rows == 1:
    #     BLOCK_ROWS = 1
    #     BLOCK_COLS = triton.next_power_of_2(min(n_cols, max_block_elems))
    # else:
    #     # 调整这个参数了
    #     if n_cols < 6656:
    #         # 多读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    #     else:
    #         # 少读几行
    #         BLOCK_ROWS = triton.next_power_of_2(min(2, n_rows))
    #         BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    # ===== V4 =====
    # ===== V1 ===== 
    # if n_rows < 256:
    #     BLOCK_ROWS = 1
    #     BLOCK_COLS = triton.next_power_of_2(min(n_cols, max_block_elems))
    # else:
    #     BLOCK_ROWS = triton.next_power_of_2(min(4, n_rows))
    #     BLOCK_COLS = triton.next_power_of_2(min(max_block_elems // BLOCK_ROWS, n_cols))
    # ===== V1 ===== 

    # ===== V5 =====
    # if n_rows == 1:
    #     BLOCK_ROWS = 1
    #     BLOCK_COLS = triton.next_power_of_2(min(n_cols, max_block_elems))
    # else:
    #     BLOCK_COLS = triton.next_power_of_2(min(2048, n_cols))
    #     BLOCK_ROWS = triton.next_power_of_2(min(max_block_elems // BLOCK_COLS, n_rows))
    # ===== V5 =====

    # grid_rows = (n_rows + BLOCK_ROWS - 1) // BLOCK_ROWS
    grid = (num_vectorcore, 1, 1)
    
    softmax_triton[grid](
        x, x.stride(0),
        y, y.stride(0),
        n_rows, n_cols,
        BLOCK_ROWS=BLOCK_ROWS,
        BLOCK_COLS=BLOCK_COLS,
    )
    return y
    
@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_softmax(test_case):
    (sigtype, shape) = test_case
    dtype = dtype_map[sigtype]
    torch.set_default_device(DEVICE)
    torch.npu.set_device(2)
    torch.manual_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)
    x = test_common.generate_tensor(shape, sigtype).npu()
    # y_triton = softmax(x)
    stream = torch.npu.current_stream()
    experimental_config = torch_npu.profiler._ExperimentalConfig(
        aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
        profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
        l2_cache=True ,
        data_simplification=False
    )
    test_str = "x".join(map(str, (sigtype, *shape, SEED)))
    triton_path = "./prof_final_work/" + test_str + "/"
    # triton_path = "./prof/"
    os.makedirs(triton_path, exist_ok=True)
    LOOP = 100
    with torch_npu.profiler.profile(
        activities=[
            torch_npu.profiler.ProfilerActivity.NPU
        ],
        schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
        on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
        record_shapes=False,
        profile_memory=False,
        with_stack=False,
        with_flops=False,
        with_modules=False,
        experimental_config=experimental_config) as prof:

        stream.synchronize()
        prof.step()
        for i in range(LOOP + 2):
            y_triton = softmax(x)
            prof.step()
            if i == 0:
                stream.synchronize()
        stream.synchronize()
    # y_torch = softmax_torch(x)
    # test_common.validate_cmp(sigtype, y_triton, y_torch)
