import torch
import torch_npu
import triton
import triton.language as tl

from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math

def naive_softmax(x):
    """Compute row-wise softmax of X using native pytorch

    We subtract the maximum element in order to avoid overflows. Softmax is invariant to
    this shift.
    """
    # read  MN elements ; write M  elements
    x_max = x.max(dim=1)[0]
    # read MN + M elements ; write MN elements
    z = x - x_max[:, None]
    # read  MN elements ; write MN elements
    numerator = torch.exp(z)
    # read  MN elements ; write M  elements
    denominator = numerator.sum(dim=1)
    # read MN + M elements ; write MN elements
    ret = numerator / denominator[:, None]
    # in total: read 5MN + 2M elements ; wrote 3MN + 2M elements
    return ret

@triton.jit
def softmax_kernel(in_ptr0, out_ptr2, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
    xoffset = tl.program_id(0) * XBLOCK
    xindex = tl.full([1], xoffset, tl.int32)
    xmask = tl.full([RBLOCK], True, tl.int1)
    rindex = tl.arange(0, RBLOCK)[:]
    roffset = 0
    rmask = tl.full([RBLOCK], True, tl.int1)
    r1 = rindex
    x0 = xindex
    tmp0 = tl.load(in_ptr0 + (r1 + (RBLOCK*x0)), None)
    tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
    tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
    tmp4 = tmp0 - tmp3
    tmp5 = tl_math.exp(tmp4)
    tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
    tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
    tmp9 = tmp5 / tmp8
    tl.store(out_ptr2 + (r1 + (RBLOCK*x0)), tmp9, None)

def softmax_new(inputs, dim, half_to_float=False):
    print("custom triton kernel: SOFTMAX")
    dim = dim % inputs.ndim
    M = 1
    N = inputs.shape[dim]
    for i in range(dim):
        M *= inputs.shape[i] 
    grid = (M, 1, 1)
    if half_to_float:
        dtype = torch.float32
    else:
        dtype = inputs.dtype
    outs = torch.empty_like(inputs, dtype=dtype)
    softmax_kernel[grid](inputs, outs, XBLOCK=1, RBLOCK=2048)
    return outs

if __name__ == '__main__':
    shape = (1024, 2048)
    # x = torch.randn(shape, device='npu', dtype=torch.float32)
    x = torch.randn(shape, device='npu', dtype=torch.float32)
    output_torch = naive_softmax(x)
    output_triton = softmax_new(x, -1)

    print("x: ", x)
    print("torch: ", output_torch)
    print("triton: ", output_triton)
