import torch
import torch_npu
import triton
import triton.language as tl
import test_common
import pytest
import numpy as np
import random

def naive_softmax(x):
    # dim的选择 是按照shape来的 从(0, 1, 2, ...)开始的 对应于相应的维度信息
    x_max = x.max(dim=1)[0]
    z = x - x_max[:, None]
    numerator = torch.exp(z)
    denominator = numerator.sum(dim=1)
    ret = numerator / denominator[:, None]
    return ret


@triton.jit
def softmax_kernel(output_ptr, input_ptr, input_row_stride, output_row_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr):
    row_start = tl.program_id(0)
    # 开启的计算核数量
    row_step = tl.num_programs(0)
    # 进行分块处理 最好就是开启对应硬件核的计算数量
    # 之后在进行循环操作 步距选择为总的计算核数量
    for row_idx in tl.range(row_start, n_rows, row_step):
        row_start_ptr = input_ptr + row_idx * input_row_stride
        col_offsets = tl.arange(0, BLOCK_SIZE)
        input_ptrs = row_start_ptr + col_offsets
        mask = col_offsets < n_cols
        # 每次处理一行
        row = tl.load(input_ptrs, mask=mask, other=-float('inf'))
        row_minus_max = row - tl.max(row, axis=0)
        numerator = tl.exp(row_minus_max)
        denominator = tl.sum(numerator, axis=0)
        softmax_output = numerator / denominator
        output_row_start_ptr = output_ptr + row_idx * output_row_stride
        output_ptrs = output_row_start_ptr + col_offsets
        tl.store(output_ptrs, softmax_output, mask=mask)

def softmax(x):
    n_rows, n_cols = x.shape
    BLOCK_SIZE = triton.next_power_of_2(n_cols)
    y = torch.empty_like(x)
    num_programs = 20
    num_programs = min(num_programs, n_rows)
    softmax_kernel[(num_programs, 1, 1)](
        y,
        x,
        x.stride(0),
        y.stride(0),
        n_rows,
        n_cols,
        BLOCK_SIZE
    )
    return y

types = [
    (torch.float32, 'float32'),
    (torch.float16, 'float16'),
    (torch.bfloat16, 'bfloat16'),
]

# 字节数为负数时 表示按照总字节数来计算对应的元素个数
shapes = [
    (1823, 781),
    (1823, 2),
    (1823, 4),
    (1823, -32),
    (1823, -100),
    (1823, -256),
]

@pytest.mark.parametrize('dtype, sigtype',types)
@pytest.mark.parametrize('shape',shapes)
def test_softmax(dtype, sigtype, shape):
    torch.manual_seed(42)
    np.random.seed(42)
    random.seed(42)
    torch.npu.set_device(7)

    # M = (-M)//torch.tensor(0,dtype=dtype).element_size() if M<0 else M
    # N = (-N)//torch.tensor(0,dtype=dtype).element_size() if N<0 else N

    M = (-M) if M<0 else M
    N = (-N) if N<0 else N

    x = test_common.generate_tensor(shape, dtype)
    y_triton = softmax(x)
    y_torch = naive_softmax(x)
    test_common.validate_cmp(sigtype, y_triton, y_torch)
