#!/usr/bin/env python3


import triton
import triton.language as tl

from compile_utils import *
from targets import *

# from triton.compiler.compiler import AttrsDescriptor
#
# from torch._inductor.runtime import triton_helpers, triton_heuristics
# from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
# from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties
#
# from torch._inductor.runtime import triton_helpers
# from torch_npu._inductor import npu_triton_heuristics
# from torch_npu._inductor import npu_triton_helpers
# from torch_npu._inductor.runtime import NPUDeviceProperties
# from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
# import torch
# import torch_npu
#
#
# @npu_triton_heuristics.persistent_reduction_npu_index(
#     size_hints=[128, 16, 50],
#     reduction_hint=ReductionHint.DEFAULT,
#     filename=__file__,
#     triton_meta={'signature': {'in_ptr0': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32', 'r2_numel': 'i32'}, 'device': NPUDeviceProperties(
#         type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
#     inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_sum_1', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1, 2], 'axis_names': ['y0', 'x1', 'r2'], 'low_dims': {
#         1}, 'numof_reduction_axis': 1, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False}
# )


@triton.jit
def triton_unk_fused_sum_1(in_ptr0, out_ptr0, y0_numel, x1_numel, r2_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    R2BLOCK_SUB: tl.constexpr = 64
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0 = tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1 = tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    base_r2 = tl.arange(0, R2BLOCK_SUB)
    loops_r2 = (r2_numel + R2BLOCK_SUB - 1) // R2BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:, None, None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None, None, :]
            x1_mask = x1 < x1_numel
            r2 = base_r2[None, :, None]
            r2_mask = r2 < r2_numel
            tmp0 = tl.load(in_ptr0 + (x1 + 16*r2 + 800*y0),
                           r2_mask & x1_mask & y0_mask, other=0.0)
            tmp1 = tl.reshape(tmp0, [Y0BLOCK_SUB, R2BLOCK_SUB, X1BLOCK_SUB])
            tmp3 = tl.where(r2_mask & x1_mask & y0_mask, tmp1, 0)
            tmp4 = tl.sum(tmp3, 1).reshape(Y0BLOCK_SUB, 1, X1BLOCK_SUB)
            tl.store(out_ptr0 + (x1 + 16*y0), tmp4, x1_mask & y0_mask)


k = compile_ast_only(
    triton.compiler.ASTSource(fn=triton_unk_fused_sum_1, signature={'in_ptr0': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32', 'r2_numel': 'i32', 'Y0BLOCK': 'constexpr', 'Y0BLOCK_SUB': 'constexpr', 'X1BLOCK_SUB': 'constexpr'},
                              constexprs={'Y0BLOCK': 128, 'Y0BLOCK_SUB': 16, 'X1BLOCK_SUB': 16}), target=TARGET)
