# AOT ID: ['1_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import torch_npu
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import (
    split_scan_grid,
    grid_combo_kernels,
    start_graph,
    end_graph,
    cooperative_reduction_grid,
)
from torch_npu._inductor.npu_triton_heuristics import grid
import torch_npu
from torch_npu._inductor import get_current_raw_stream as get_raw_stream
from torch_npu._inductor import get_current_raw_stream as get_raw_stream

aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p


# kernel path: /tmp/torchinductor_root/gp/cgpumwfcqzphbtyxa6lycbvzhalyi5eam4r67re2zxecxn3w52na.py
# Topologically Sorted Source Nodes: [eq, zeros_like, feature_dense], Original ATen: [aten.eq, aten.zeros_like, aten.where]
# Source node to ATen node mapping:
#   eq => eq
#   feature_dense => where
#   zeros_like => full_default
# Graph fragment:
#   %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg34_1, -1), kwargs = {})
#   %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([128, 50], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: npu:0, pin_memory: False})
#   %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %arg34_1), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op34')]

triton_unk_fused_eq_where_zeros_like_0 = async_compile.triton('triton_unk_fused_eq_where_zeros_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[6400], 
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*i64', 'out_ptr0': '*i64', 'x0_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_eq_where_zeros_like_0', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0], 'axis_names': ['x0'], 'low_dims': {0}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.int64, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_eq_where_zeros_like_0(in_ptr0, out_ptr0, x0_numel, X0BLOCK: tl.constexpr, X0BLOCK_SUB: tl.constexpr):
    x0_offset = tl.program_id(0) * X0BLOCK
    base_x0= tl.arange(0, X0BLOCK_SUB)
    loops_x0 = (X0BLOCK + X0BLOCK_SUB - 1) // X0BLOCK_SUB
    for loop_x0 in range(loops_x0):
        x0 = x0_offset + (loop_x0 * X0BLOCK_SUB) + base_x0
        x0_mask = x0 < min(X0BLOCK+x0_offset, x0_numel)
        tmp0 = tl.load(in_ptr0 + (x0), x0_mask)
        tmp1 = tl.full([1], -1, tl.int64)
        tmp2 = tmp0 == tmp1
        tmp3 = tl.full([1], 0, tl.int64)
        tmp4 = tl.where(tmp2, tmp3, tmp0)
        tl.store(out_ptr0 + (x0), tmp4, x0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/ko/cko5qk4flkixc6ts2do525qbtwt3dkpf3vn7mnaydwgn75zwxc4a.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
#   sum_1 => sum_1
# Graph fragment:
#   %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%embedding_17, [1]), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op37')]

triton_unk_fused_sum_1 = async_compile.triton('triton_unk_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.persistent_reduction_npu_index(
    size_hints=[128, 16, 50],
    reduction_hint=ReductionHint.DEFAULT,
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32', 'r2_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_sum_1', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1, 2], 'axis_names': ['y0', 'x1', 'r2'], 'low_dims': {1}, 'numof_reduction_axis': 1, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False}
)
@triton.jit
def triton_unk_fused_sum_1(in_ptr0, out_ptr0, y0_numel, x1_numel, r2_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    R2BLOCK_SUB: tl.constexpr = 64
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    base_r2= tl.arange(0, R2BLOCK_SUB)
    loops_r2 = (r2_numel + R2BLOCK_SUB - 1) // R2BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,None,:]
            x1_mask = x1 < x1_numel
            r2 = base_r2[None,:,None]
            r2_mask = r2 < r2_numel
            tmp0 = tl.load(in_ptr0 + (x1 + 16*r2 + 800*y0), r2_mask & x1_mask & y0_mask, other=0.0)
            tmp1 = tl.reshape(tmp0, [Y0BLOCK_SUB, R2BLOCK_SUB, X1BLOCK_SUB])
            tmp3 = tl.where(r2_mask & x1_mask & y0_mask, tmp1, 0)
            tmp4 = tl.sum(tmp3, 1).reshape(Y0BLOCK_SUB, 1, X1BLOCK_SUB)
            tl.store(out_ptr0 + (x1 + 16*y0 ), tmp4, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/u5/cu53fmpo6l5y7htkf4zvl4m4xyhc3pwoa5civiaole47bneuq6ej.py
# Topologically Sorted Source Nodes: [eq_4, zeros_like_4, feature_sparse], Original ATen: [aten.eq, aten.zeros_like, aten.where]
# Source node to ATen node mapping:
#   eq_4 => eq_4
#   feature_sparse => where_4
#   zeros_like_4 => full_default_4
# Graph fragment:
#   %eq_4 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg42_1, -1), kwargs = {})
#   %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([128, 38], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: npu:0, pin_memory: False})
#   %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_4, %full_default_4, %arg42_1), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op50')]

triton_unk_fused_eq_where_zeros_like_2 = async_compile.triton('triton_unk_fused_eq_where_zeros_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[4864], 
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*i64', 'out_ptr0': '*i64', 'x0_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_eq_where_zeros_like_2', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0], 'axis_names': ['x0'], 'low_dims': {0}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.int64, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_eq_where_zeros_like_2(in_ptr0, out_ptr0, x0_numel, X0BLOCK: tl.constexpr, X0BLOCK_SUB: tl.constexpr):
    x0_offset = tl.program_id(0) * X0BLOCK
    base_x0= tl.arange(0, X0BLOCK_SUB)
    loops_x0 = (X0BLOCK + X0BLOCK_SUB - 1) // X0BLOCK_SUB
    for loop_x0 in range(loops_x0):
        x0 = x0_offset + (loop_x0 * X0BLOCK_SUB) + base_x0
        x0_mask = x0 < min(X0BLOCK+x0_offset, x0_numel)
        tmp0 = tl.load(in_ptr0 + (x0), x0_mask)
        tmp1 = tl.full([1], -1, tl.int64)
        tmp2 = tmp0 == tmp1
        tmp3 = tl.full([1], 0, tl.int64)
        tmp4 = tl.where(tmp2, tmp3, tmp0)
        tl.store(out_ptr0 + (x0), tmp4, x0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/y2/cy26nbluarayug3slmqrbzi2ahqalitza4e4e3sm6y3qtja645au.py
# Topologically Sorted Source Nodes: [sparse_lookup_embedding, sum_5], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
#   sparse_lookup_embedding => mul
#   sum_5 => sum_5
# Graph fragment:
#   %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%embedding_21, %unsqueeze), kwargs = {})
#   %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op53')]

triton_unk_fused_mul_sum_3 = async_compile.triton('triton_unk_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.persistent_reduction_npu_index(
    size_hints=[128, 16, 38],
    reduction_hint=ReductionHint.DEFAULT,
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*fp32', 'in_ptr1': '*i64', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32', 'r2_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_mul_sum_3', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1, 2], 'axis_names': ['y0', 'x1', 'r2'], 'low_dims': {1, 2}, 'numof_reduction_axis': 1, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False}
)
@triton.jit
def triton_unk_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, y0_numel, x1_numel, r2_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    R2BLOCK_SUB: tl.constexpr = 64
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    base_r2= tl.arange(0, R2BLOCK_SUB)
    loops_r2 = (r2_numel + R2BLOCK_SUB - 1) // R2BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,None,:]
            x1_mask = x1 < x1_numel
            r2 = base_r2[None,:,None]
            r2_mask = r2 < r2_numel
            tmp0 = tl.load(in_ptr0 + (x1 + 16*r2 + 608*y0), r2_mask & x1_mask & y0_mask, other=0.0)
            tmp1 = tl.load(in_ptr1 + (r2 + 38*y0), r2_mask & y0_mask, other=0.0)
            tmp2 = tl.full([1, 1, 1], 0, tl.int64)
            tmp3 = tmp1 >= tmp2
            tmp4 = tmp3.to(tl.float32)
            tmp5 = tmp0 * tmp4
            tmp6 = tl.reshape(tmp5, [Y0BLOCK_SUB, R2BLOCK_SUB, X1BLOCK_SUB])
            tmp8 = tl.where(r2_mask & x1_mask & y0_mask, tmp6, 0)
            tmp9 = tl.sum(tmp8, 1).reshape(Y0BLOCK_SUB, 1, X1BLOCK_SUB)
            tl.store(out_ptr0 + (x1 + 16*y0 ), tmp9, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/gm/cgmum4m3cpugtduxmu4uy57fg65ydrzspyeerhncciiaio62oi66.py
# Topologically Sorted Source Nodes: [input_features], Original ATen: [aten.cat]
# Source node to ATen node mapping:
#   input_features => cat
# Graph fragment:
#   %cat : [num_users=23] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1, %squeeze_2, %squeeze_3, %squeeze_4, %squeeze_5, %squeeze_6, %squeeze_7, %squeeze_8, %squeeze_9, %squeeze_10, %squeeze_11, %squeeze_12, %squeeze_13, %squeeze_14, %squeeze_15, %squeeze_16, %sum_1, %sum_2, %sum_3, %sum_4, %sum_5, %sum_6], 1), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op58')]

triton_unk_fused_cat_4 = async_compile.triton('triton_unk_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[128, 368], tile_hint=TileHint.DEFAULT,
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*fp32', 'in_ptr1': '*fp32', 'in_ptr2': '*fp32', 'in_ptr3': '*fp32', 'in_ptr4': '*fp32', 'in_ptr5': '*fp32', 'in_ptr6': '*fp32', 'in_ptr7': '*fp32', 'in_ptr8': '*fp32', 'in_ptr9': '*fp32', 'in_ptr10': '*fp32', 'in_ptr11': '*fp32', 'in_ptr12': '*fp32', 'in_ptr13': '*fp32', 'in_ptr14': '*fp32', 'in_ptr15': '*fp32', 'in_ptr16': '*fp32', 'in_ptr17': '*fp32', 'in_ptr18': '*fp32', 'in_ptr19': '*fp32', 'in_ptr20': '*fp32', 'in_ptr21': '*fp32', 'in_ptr22': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_cat_4', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1], 'axis_names': ['y0', 'x1'], 'low_dims': {1}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, out_ptr0, y0_numel, x1_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,:]
            x1_mask = x1 < x1_numel
            tmp3 = tl.load(in_ptr0 + (16*y0 + (x1)), x1_mask & y0_mask)
            tmp6 = tl.load(in_ptr1 + (16*y0 + ((-16) + x1)), x1_mask & y0_mask)
            tmp9 = tl.load(in_ptr2 + (16*y0 + ((-32) + x1)), x1_mask & y0_mask)
            tmp12 = tl.load(in_ptr3 + (16*y0 + ((-48) + x1)), x1_mask & y0_mask)
            tmp15 = tl.load(in_ptr4 + (16*y0 + ((-64) + x1)), x1_mask & y0_mask)
            tmp18 = tl.load(in_ptr5 + (16*y0 + ((-80) + x1)), x1_mask & y0_mask)
            tmp21 = tl.load(in_ptr6 + (16*y0 + ((-96) + x1)), x1_mask & y0_mask)
            tmp24 = tl.load(in_ptr7 + (16*y0 + ((-112) + x1)), x1_mask & y0_mask)
            tmp27 = tl.load(in_ptr8 + (16*y0 + ((-128) + x1)), x1_mask & y0_mask)
            tmp30 = tl.load(in_ptr9 + (16*y0 + ((-144) + x1)), x1_mask & y0_mask)
            tmp33 = tl.load(in_ptr10 + (16*y0 + ((-160) + x1)), x1_mask & y0_mask)
            tmp36 = tl.load(in_ptr11 + (16*y0 + ((-176) + x1)), x1_mask & y0_mask)
            tmp39 = tl.load(in_ptr12 + (16*y0 + ((-192) + x1)), x1_mask & y0_mask)
            tmp42 = tl.load(in_ptr13 + (16*y0 + ((-208) + x1)), x1_mask & y0_mask)
            tmp45 = tl.load(in_ptr14 + (16*y0 + ((-224) + x1)), x1_mask & y0_mask)
            tmp48 = tl.load(in_ptr15 + (16*y0 + ((-240) + x1)), x1_mask & y0_mask)
            tmp51 = tl.load(in_ptr16 + (16*y0 + ((-256) + x1)), x1_mask & y0_mask)
            tmp54 = tl.load(in_ptr17 + (16*y0 + ((-272) + x1)), x1_mask & y0_mask)
            tmp57 = tl.load(in_ptr18 + (16*y0 + ((-288) + x1)), x1_mask & y0_mask)
            tmp60 = tl.load(in_ptr19 + (16*y0 + ((-304) + x1)), x1_mask & y0_mask)
            tmp63 = tl.load(in_ptr20 + (16*y0 + ((-320) + x1)), x1_mask & y0_mask)
            tmp66 = tl.load(in_ptr21 + (16*y0 + ((-336) + x1)), x1_mask & y0_mask)
            tmp69 = tl.load(in_ptr22 + (16*y0 + ((-352) + x1)), x1_mask & y0_mask)
            tmp0 = x1
            tmp1 = tl.full([1, 1], 16, tl.int32)
            tmp2 = tmp0 < tmp1
            tmp4 = tl.full([1, 1], 32, tl.int32)
            tmp5 = tmp0 < tmp4
            tmp7 = tl.full([1, 1], 48, tl.int32)
            tmp8 = tmp0 < tmp7
            tmp10 = tl.full([1, 1], 64, tl.int32)
            tmp11 = tmp0 < tmp10
            tmp13 = tl.full([1, 1], 80, tl.int32)
            tmp14 = tmp0 < tmp13
            tmp16 = tl.full([1, 1], 96, tl.int32)
            tmp17 = tmp0 < tmp16
            tmp19 = tl.full([1, 1], 112, tl.int32)
            tmp20 = tmp0 < tmp19
            tmp22 = tl.full([1, 1], 128, tl.int32)
            tmp23 = tmp0 < tmp22
            tmp25 = tl.full([1, 1], 144, tl.int32)
            tmp26 = tmp0 < tmp25
            tmp28 = tl.full([1, 1], 160, tl.int32)
            tmp29 = tmp0 < tmp28
            tmp31 = tl.full([1, 1], 176, tl.int32)
            tmp32 = tmp0 < tmp31
            tmp34 = tl.full([1, 1], 192, tl.int32)
            tmp35 = tmp0 < tmp34
            tmp37 = tl.full([1, 1], 208, tl.int32)
            tmp38 = tmp0 < tmp37
            tmp40 = tl.full([1, 1], 224, tl.int32)
            tmp41 = tmp0 < tmp40
            tmp43 = tl.full([1, 1], 240, tl.int32)
            tmp44 = tmp0 < tmp43
            tmp46 = tl.full([1, 1], 256, tl.int32)
            tmp47 = tmp0 < tmp46
            tmp49 = tl.full([1, 1], 272, tl.int32)
            tmp50 = tmp0 < tmp49
            tmp52 = tl.full([1, 1], 288, tl.int32)
            tmp53 = tmp0 < tmp52
            tmp55 = tl.full([1, 1], 304, tl.int32)
            tmp56 = tmp0 < tmp55
            tmp58 = tl.full([1, 1], 320, tl.int32)
            tmp59 = tmp0 < tmp58
            tmp61 = tl.full([1, 1], 336, tl.int32)
            tmp62 = tmp0 < tmp61
            tmp64 = tl.full([1, 1], 352, tl.int32)
            tmp65 = tmp0 < tmp64
            tmp67 = tl.full([1, 1], 368, tl.int32)
            tmp68 = tmp0 < tmp67
            tmp70 = tl.where(tmp65, tmp66, tmp69)
            tmp71 = tl.where(tmp62, tmp63, tmp70)
            tmp72 = tl.where(tmp59, tmp60, tmp71)
            tmp73 = tl.where(tmp56, tmp57, tmp72)
            tmp74 = tl.where(tmp53, tmp54, tmp73)
            tmp75 = tl.where(tmp50, tmp51, tmp74)
            tmp76 = tl.where(tmp47, tmp48, tmp75)
            tmp77 = tl.where(tmp44, tmp45, tmp76)
            tmp78 = tl.where(tmp41, tmp42, tmp77)
            tmp79 = tl.where(tmp38, tmp39, tmp78)
            tmp80 = tl.where(tmp35, tmp36, tmp79)
            tmp81 = tl.where(tmp32, tmp33, tmp80)
            tmp82 = tl.where(tmp29, tmp30, tmp81)
            tmp83 = tl.where(tmp26, tmp27, tmp82)
            tmp84 = tl.where(tmp23, tmp24, tmp83)
            tmp85 = tl.where(tmp20, tmp21, tmp84)
            tmp86 = tl.where(tmp17, tmp18, tmp85)
            tmp87 = tl.where(tmp14, tmp15, tmp86)
            tmp88 = tl.where(tmp11, tmp12, tmp87)
            tmp89 = tl.where(tmp8, tmp9, tmp88)
            tmp90 = tl.where(tmp5, tmp6, tmp89)
            tmp91 = tl.where(tmp2, tmp3, tmp90)
            tl.store(out_ptr0 + (x1 + 368*y0), tmp91, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/3p/c3pcudckeijoyammtmc2t5swov37x4dauhi6e4upfkb3pe3jbkbp.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
#   cat => cat_1
# Graph fragment:
#   %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_2, %slice_4, %slice_6, %slice_8, %slice_10, %slice_12, %slice_14, %slice_16, %slice_18, %slice_20, %slice_22, %slice_24, %slice_26, %slice_28, %slice_30, %slice_32, %slice_34, %slice_36, %slice_38, %slice_40, %slice_42, %slice_44, %slice_46], -1), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op59')]

triton_unk_fused_cat_5 = async_compile.triton('triton_unk_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[128, 368], tile_hint=TileHint.SQUARE,
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_cat_5', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1], 'axis_names': ['y0', 'x1'], 'low_dims': {1}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_cat_5(in_ptr0, out_ptr0, y0_numel, x1_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,:]
            x1_mask = x1 < x1_numel
            tmp3 = tl.load(in_ptr0 + (368*y0 + (x1)), x1_mask & y0_mask)
            tmp6 = tl.load(in_ptr0 + (16 + 368*y0 + ((-16) + x1)), x1_mask & y0_mask)
            tmp9 = tl.load(in_ptr0 + (32 + 368*y0 + ((-32) + x1)), x1_mask & y0_mask)
            tmp12 = tl.load(in_ptr0 + (48 + 368*y0 + ((-48) + x1)), x1_mask & y0_mask)
            tmp15 = tl.load(in_ptr0 + (64 + 368*y0 + ((-64) + x1)), x1_mask & y0_mask)
            tmp18 = tl.load(in_ptr0 + (80 + 368*y0 + ((-80) + x1)), x1_mask & y0_mask)
            tmp21 = tl.load(in_ptr0 + (96 + 368*y0 + ((-96) + x1)), x1_mask & y0_mask)
            tmp24 = tl.load(in_ptr0 + (112 + 368*y0 + ((-112) + x1)), x1_mask & y0_mask)
            tmp27 = tl.load(in_ptr0 + (128 + 368*y0 + ((-128) + x1)), x1_mask & y0_mask)
            tmp30 = tl.load(in_ptr0 + (144 + 368*y0 + ((-144) + x1)), x1_mask & y0_mask)
            tmp33 = tl.load(in_ptr0 + (160 + 368*y0 + ((-160) + x1)), x1_mask & y0_mask)
            tmp36 = tl.load(in_ptr0 + (176 + 368*y0 + ((-176) + x1)), x1_mask & y0_mask)
            tmp39 = tl.load(in_ptr0 + (192 + 368*y0 + ((-192) + x1)), x1_mask & y0_mask)
            tmp42 = tl.load(in_ptr0 + (208 + 368*y0 + ((-208) + x1)), x1_mask & y0_mask)
            tmp45 = tl.load(in_ptr0 + (224 + 368*y0 + ((-224) + x1)), x1_mask & y0_mask)
            tmp48 = tl.load(in_ptr0 + (240 + 368*y0 + ((-240) + x1)), x1_mask & y0_mask)
            tmp51 = tl.load(in_ptr0 + (256 + 368*y0 + ((-256) + x1)), x1_mask & y0_mask)
            tmp54 = tl.load(in_ptr0 + (272 + 368*y0 + ((-272) + x1)), x1_mask & y0_mask)
            tmp57 = tl.load(in_ptr0 + (288 + 368*y0 + ((-288) + x1)), x1_mask & y0_mask)
            tmp60 = tl.load(in_ptr0 + (304 + 368*y0 + ((-304) + x1)), x1_mask & y0_mask)
            tmp63 = tl.load(in_ptr0 + (320 + 368*y0 + ((-320) + x1)), x1_mask & y0_mask)
            tmp66 = tl.load(in_ptr0 + (336 + 368*y0 + ((-336) + x1)), x1_mask & y0_mask)
            tmp69 = tl.load(in_ptr0 + (352 + 368*y0 + ((-352) + x1)), x1_mask & y0_mask)
            tmp0 = x1
            tmp1 = tl.full([1, 1], 16, tl.int32)
            tmp2 = tmp0 < tmp1
            tmp4 = tl.full([1, 1], 32, tl.int32)
            tmp5 = tmp0 < tmp4
            tmp7 = tl.full([1, 1], 48, tl.int32)
            tmp8 = tmp0 < tmp7
            tmp10 = tl.full([1, 1], 64, tl.int32)
            tmp11 = tmp0 < tmp10
            tmp13 = tl.full([1, 1], 80, tl.int32)
            tmp14 = tmp0 < tmp13
            tmp16 = tl.full([1, 1], 96, tl.int32)
            tmp17 = tmp0 < tmp16
            tmp19 = tl.full([1, 1], 112, tl.int32)
            tmp20 = tmp0 < tmp19
            tmp22 = tl.full([1, 1], 128, tl.int32)
            tmp23 = tmp0 < tmp22
            tmp25 = tl.full([1, 1], 144, tl.int32)
            tmp26 = tmp0 < tmp25
            tmp28 = tl.full([1, 1], 160, tl.int32)
            tmp29 = tmp0 < tmp28
            tmp31 = tl.full([1, 1], 176, tl.int32)
            tmp32 = tmp0 < tmp31
            tmp34 = tl.full([1, 1], 192, tl.int32)
            tmp35 = tmp0 < tmp34
            tmp37 = tl.full([1, 1], 208, tl.int32)
            tmp38 = tmp0 < tmp37
            tmp40 = tl.full([1, 1], 224, tl.int32)
            tmp41 = tmp0 < tmp40
            tmp43 = tl.full([1, 1], 240, tl.int32)
            tmp44 = tmp0 < tmp43
            tmp46 = tl.full([1, 1], 256, tl.int32)
            tmp47 = tmp0 < tmp46
            tmp49 = tl.full([1, 1], 272, tl.int32)
            tmp50 = tmp0 < tmp49
            tmp52 = tl.full([1, 1], 288, tl.int32)
            tmp53 = tmp0 < tmp52
            tmp55 = tl.full([1, 1], 304, tl.int32)
            tmp56 = tmp0 < tmp55
            tmp58 = tl.full([1, 1], 320, tl.int32)
            tmp59 = tmp0 < tmp58
            tmp61 = tl.full([1, 1], 336, tl.int32)
            tmp62 = tmp0 < tmp61
            tmp64 = tl.full([1, 1], 352, tl.int32)
            tmp65 = tmp0 < tmp64
            tmp67 = tl.full([1, 1], 368, tl.int32)
            tmp68 = tmp0 < tmp67
            tmp70 = tl.where(tmp65, tmp66, tmp69)
            tmp71 = tl.where(tmp62, tmp63, tmp70)
            tmp72 = tl.where(tmp59, tmp60, tmp71)
            tmp73 = tl.where(tmp56, tmp57, tmp72)
            tmp74 = tl.where(tmp53, tmp54, tmp73)
            tmp75 = tl.where(tmp50, tmp51, tmp74)
            tmp76 = tl.where(tmp47, tmp48, tmp75)
            tmp77 = tl.where(tmp44, tmp45, tmp76)
            tmp78 = tl.where(tmp41, tmp42, tmp77)
            tmp79 = tl.where(tmp38, tmp39, tmp78)
            tmp80 = tl.where(tmp35, tmp36, tmp79)
            tmp81 = tl.where(tmp32, tmp33, tmp80)
            tmp82 = tl.where(tmp29, tmp30, tmp81)
            tmp83 = tl.where(tmp26, tmp27, tmp82)
            tmp84 = tl.where(tmp23, tmp24, tmp83)
            tmp85 = tl.where(tmp20, tmp21, tmp84)
            tmp86 = tl.where(tmp17, tmp18, tmp85)
            tmp87 = tl.where(tmp14, tmp15, tmp86)
            tmp88 = tl.where(tmp11, tmp12, tmp87)
            tmp89 = tl.where(tmp8, tmp9, tmp88)
            tmp90 = tl.where(tmp5, tmp6, tmp89)
            tmp91 = tl.where(tmp2, tmp3, tmp90)
            tl.store(out_ptr0 + (x1 + 368*y0), tmp91, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/6q/c6qjr5sgtgr5sws2ppfwr62bf6ksshkeq2ful7jeyd4eluh3pj72.py
# Topologically Sorted Source Nodes: [fc, fc_1], Original ATen: [aten.addmm, aten.relu]
# Source node to ATen node mapping:
#   fc => add_tensor_3
#   fc_1 => relu
# Graph fragment:
#   %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %arg47_1), kwargs = {})
#   %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op62')]

triton_unk_fused_addmm_relu_6 = async_compile.triton('triton_unk_fused_addmm_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[128, 256], tile_hint=TileHint.SQUARE,
    filename=__file__,
    triton_meta={'signature': {'in_out_ptr0': '*fp32', 'in_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_addmm_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1], 'axis_names': ['y0', 'x1'], 'low_dims': {1}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_addmm_relu_6(in_out_ptr0, in_ptr0, y0_numel, x1_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,:]
            x1_mask = x1 < x1_numel
            tmp0 = tl.load(in_out_ptr0 + (x1 + 256*y0), x1_mask & y0_mask)
            tmp1 = tl.load(in_ptr0 + (x1), x1_mask)
            tmp2 = tmp0 + tmp1
            tmp3 = tl.full([1, 1], 0, tl.int32)
            tmp4 = triton_helpers.maximum(tmp3, tmp2)
            tl.store(in_out_ptr0 + (x1 + 256*y0), tmp4, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/og/cogdmdvqf3s5m6kvpjridziqs63bz7gezu7cpmudqv3phfb4sysq.py
# Topologically Sorted Source Nodes: [fc_3, fc_4], Original ATen: [aten.addmm, aten.relu]
# Source node to ATen node mapping:
#   fc_3 => add_tensor_2
#   fc_4 => relu_1
# Graph fragment:
#   %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %arg49_1), kwargs = {})
#   %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op65')]

triton_unk_fused_addmm_relu_7 = async_compile.triton('triton_unk_fused_addmm_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[128, 128], tile_hint=TileHint.SQUARE,
    filename=__file__,
    triton_meta={'signature': {'in_out_ptr0': '*fp32', 'in_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_addmm_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1], 'axis_names': ['y0', 'x1'], 'low_dims': {1}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_addmm_relu_7(in_out_ptr0, in_ptr0, y0_numel, x1_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,:]
            x1_mask = x1 < x1_numel
            tmp0 = tl.load(in_out_ptr0 + (x1 + 128*y0), x1_mask & y0_mask)
            tmp1 = tl.load(in_ptr0 + (x1), x1_mask)
            tmp2 = tmp0 + tmp1
            tmp3 = tl.full([1, 1], 0, tl.int32)
            tmp4 = triton_helpers.maximum(tmp3, tmp2)
            tl.store(in_out_ptr0 + (x1 + 128*y0), tmp4, x1_mask & y0_mask)
''', device_str='npu')


# kernel path: /tmp/torchinductor_root/bf/cbfon7ocgi267a6diaw5iiyclgc7l24vvayetb75jhkovhopyjnk.py
# Topologically Sorted Source Nodes: [task_outs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
#   task_outs => cat_2
# Graph fragment:
#   %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid, %mul_2], -1), kwargs = {})
# SchedulerNodes: [SchedulerNode(name='op76')]

triton_unk_fused_cat_8 = async_compile.triton('triton_unk_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor

from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties

from torch._inductor.runtime import triton_helpers
from torch_npu._inductor import npu_triton_heuristics
from torch_npu._inductor import npu_triton_helpers
from torch_npu._inductor.runtime import NPUDeviceProperties
from torch_npu._inductor.npu_triton_helpers import libdevice, math as tl_math
import torch
import torch_npu

@npu_triton_heuristics.pointwise_npu_index(
    size_hints=[128, 2], tile_hint=TileHint.DEFAULT,
    filename=__file__,
    triton_meta={'signature': {'in_ptr0': '*fp32', 'in_ptr1': '*fp32', 'in_ptr2': '*fp32', 'out_ptr0': '*fp32', 'y0_numel': 'i32', 'x1_numel': 'i32'}, 'device': NPUDeviceProperties(type='npu', index=0, multi_processor_count=48, cc='Ascend910B1', major=None, regs_per_multiprocessor=None, max_threads_per_multi_processor=None, warp_size=None), 'constants': {}, 'mix_mode': 'aiv'},
    inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_unk_fused_cat_8', 'mutated_arg_names': [], 'backend_hash': 'd9c4a0eeab0929a9b7d5dd9e93b1b108c9976371c18f8eb154340142212fed50', 'split_axis': [0], 'tiling_axis': [0, 1], 'axis_names': ['y0', 'x1'], 'low_dims': {0, 1}, 'numof_reduction_axis': 0, 'split_axis_dtype': torch.float32, 'dual_reduction': False, 'traced_graph_hash': 'TRACED_GRAPH_HASH', 'traced_graph_dir': 'TRACED_GRAPH_DIR', 'store_cubin': False, 'force_disable_caches': False, 'profile_bandwidth_with_do_bench_using_profiling': False},
    min_elem_per_thread=0
)
@triton.jit
def triton_unk_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, y0_numel, x1_numel, Y0BLOCK: tl.constexpr, Y0BLOCK_SUB: tl.constexpr, X1BLOCK_SUB: tl.constexpr):
    y0_offset = tl.program_id(0) * Y0BLOCK
    base_y0= tl.arange(0, Y0BLOCK_SUB)
    loops_y0 = (Y0BLOCK + Y0BLOCK_SUB - 1) // Y0BLOCK_SUB
    base_x1= tl.arange(0, X1BLOCK_SUB)
    loops_x1 = (x1_numel + X1BLOCK_SUB - 1) // X1BLOCK_SUB
    for loop_y0 in range(loops_y0):
        y0 = y0_offset + (loop_y0 * Y0BLOCK_SUB) + base_y0[:,None]
        y0_mask = y0 < min(Y0BLOCK+y0_offset, y0_numel)
        for loop_x1 in range(loops_x1):
            x1 = (loop_x1 * X1BLOCK_SUB) + base_x1[None,:]
            x1_mask = x1 < x1_numel
            tmp3 = tl.load(in_ptr0 + (y0), y0_mask)
            tmp4 = tl.load(in_ptr1 + tl.arange(0,1) +  (0))
            tmp5 = tl.broadcast_to(tmp4[None,:], [1, 1 ])
            tmp12 = tl.load(in_ptr0 + (y0), y0_mask)
            tmp13 = tl.load(in_ptr1 + tl.arange(0,1) +  (0))
            tmp14 = tl.broadcast_to(tmp13[None,:], [1, 1 ])
            tmp17 = tl.load(in_ptr2 + (y0), y0_mask)
            tmp0 = x1
            tmp1 = tl.full([1, 1], 1, tl.int32)
            tmp2 = tmp0 < tmp1
            tmp6 = tmp3 + tmp5
            tmp7 = tl.sigmoid(tmp6)
            tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
            tmp9 = tl.where(tmp2, tmp7, tmp8)
            tmp10 = tl.full([1, 1], 2, tl.int32)
            tmp11 = tmp0 < tmp10
            tmp15 = tmp12 + tmp14
            tmp16 = tl.sigmoid(tmp15)
            tmp18 = tmp17 + tmp14
            tmp19 = tl.sigmoid(tmp18)
            tmp20 = tmp16 * tmp19
            tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
            tmp22 = tl.where(tmp11, tmp20, tmp21)
            tmp23 = tl.where(tmp2, tmp9, tmp22)
            tl.store(out_ptr0 + (x1 + 2*y0), tmp23, x1_mask & y0_mask)
''', device_str='npu')


async_compile.wait(globals())
del async_compile


def call(args):
    arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1, arg13_1, arg14_1, arg15_1, arg16_1, arg17_1, arg18_1, arg19_1, arg20_1, arg21_1, arg22_1, arg23_1, arg24_1, arg25_1, arg26_1, arg27_1, arg28_1, arg29_1, arg30_1, arg31_1, arg32_1, arg33_1, arg34_1, arg35_1, arg36_1, arg37_1, arg38_1, arg39_1, arg40_1, arg41_1, arg42_1, arg43_1, arg44_1, arg45_1, arg46_1, arg47_1, arg48_1, arg49_1, arg50_1, arg51_1, arg52_1, arg53_1, arg54_1, arg55_1, arg56_1 = args
    args.clear()
    # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
    buf0 = torch.ops.aten.embedding.default(
        arg0_1, arg1_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf1 = buf0

    # Topologically Sorted Source Nodes: [embedding_1], Original ATen: [aten.embedding]
    buf2 = torch.ops.aten.embedding.default(
        arg2_1, arg3_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf3 = buf2

    # Topologically Sorted Source Nodes: [embedding_2], Original ATen: [aten.embedding]
    buf4 = torch.ops.aten.embedding.default(
        arg4_1, arg5_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf5 = buf4

    # Topologically Sorted Source Nodes: [embedding_3], Original ATen: [aten.embedding]
    buf6 = torch.ops.aten.embedding.default(
        arg6_1, arg7_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf7 = buf6

    # Topologically Sorted Source Nodes: [embedding_4], Original ATen: [aten.embedding]
    buf8 = torch.ops.aten.embedding.default(
        arg8_1, arg9_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf9 = buf8

    # Topologically Sorted Source Nodes: [embedding_5], Original ATen: [aten.embedding]
    buf10 = torch.ops.aten.embedding.default(
        arg10_1, arg11_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf11 = buf10

    # Topologically Sorted Source Nodes: [embedding_6], Original ATen: [aten.embedding]
    buf12 = torch.ops.aten.embedding.default(
        arg12_1, arg13_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf13 = buf12

    # Topologically Sorted Source Nodes: [embedding_7], Original ATen: [aten.embedding]
    buf14 = torch.ops.aten.embedding.default(
        arg14_1, arg15_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf15 = buf14

    # Topologically Sorted Source Nodes: [embedding_8], Original ATen: [aten.embedding]
    buf16 = torch.ops.aten.embedding.default(
        arg16_1, arg17_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf17 = buf16

    # Topologically Sorted Source Nodes: [embedding_9], Original ATen: [aten.embedding]
    buf18 = torch.ops.aten.embedding.default(
        arg18_1, arg19_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf19 = buf18

    # Topologically Sorted Source Nodes: [embedding_10], Original ATen: [aten.embedding]
    buf20 = torch.ops.aten.embedding.default(
        arg20_1, arg21_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf21 = buf20

    # Topologically Sorted Source Nodes: [embedding_11], Original ATen: [aten.embedding]
    buf22 = torch.ops.aten.embedding.default(
        arg22_1, arg23_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf23 = buf22

    # Topologically Sorted Source Nodes: [embedding_12], Original ATen: [aten.embedding]
    buf24 = torch.ops.aten.embedding.default(
        arg24_1, arg25_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf25 = buf24

    # Topologically Sorted Source Nodes: [embedding_13], Original ATen: [aten.embedding]
    buf26 = torch.ops.aten.embedding.default(
        arg26_1, arg27_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf27 = buf26

    # Topologically Sorted Source Nodes: [embedding_14], Original ATen: [aten.embedding]
    buf28 = torch.ops.aten.embedding.default(
        arg28_1, arg29_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf29 = buf28

    # Topologically Sorted Source Nodes: [embedding_15], Original ATen: [aten.embedding]
    buf30 = torch.ops.aten.embedding.default(
        arg30_1, arg31_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf31 = buf30

    # Topologically Sorted Source Nodes: [embedding_16], Original ATen: [aten.embedding]
    buf32 = torch.ops.aten.embedding.default(
        arg32_1, arg33_1, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf33 = buf32

    buf34 = empty_strided((128, 50), (50, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq, zeros_like, feature_dense], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_0.run(
        arg34_1, buf34, 6400, grid=grid(6400), stream=stream0)

    # Topologically Sorted Source Nodes: [eq, zeros_like, feature_dense, embedding_17], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf35 = torch.ops.aten.embedding.default(
        arg35_1, buf34, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf36 = buf35

    buf37 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_sum_1.run(
        buf36, buf37, 128, 16, 50, grid=grid(128, 16), stream=stream0)

    buf38 = empty_strided((128, 50), (50, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq_1, zeros_like_1, feature_dense_1], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_0.run(
        arg36_1, buf38, 6400, grid=grid(6400), stream=stream0)

    # Topologically Sorted Source Nodes: [eq_1, zeros_like_1, feature_dense_1, embedding_18], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf39 = torch.ops.aten.embedding.default(
        arg37_1, buf38, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf40 = buf39

    buf41 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sum_2], Original ATen: [aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_sum_1.run(
        buf40, buf41, 128, 16, 50, grid=grid(128, 16), stream=stream0)

    buf42 = empty_strided((128, 50), (50, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq_2, zeros_like_2, feature_dense_2], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_0.run(
        arg38_1, buf42, 6400, grid=grid(6400), stream=stream0)

    # Topologically Sorted Source Nodes: [eq_2, zeros_like_2, feature_dense_2, embedding_19], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf43 = torch.ops.aten.embedding.default(
        arg39_1, buf42, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf44 = buf43

    buf45 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_sum_1.run(
        buf44, buf45, 128, 16, 50, grid=grid(128, 16), stream=stream0)

    buf46 = empty_strided((128, 50), (50, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq_3, zeros_like_3, feature_dense_3], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_0.run(
        arg40_1, buf46, 6400, grid=grid(6400), stream=stream0)

    # Topologically Sorted Source Nodes: [eq_3, zeros_like_3, feature_dense_3, embedding_20], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf47 = torch.ops.aten.embedding.default(
        arg41_1, buf46, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf48 = buf47

    buf49 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sum_4], Original ATen: [aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_sum_1.run(
        buf48, buf49, 128, 16, 50, grid=grid(128, 16), stream=stream0)

    buf50 = empty_strided((128, 38), (38, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq_4, zeros_like_4, feature_sparse], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_2.run(
        arg42_1, buf50, 4864, grid=grid(4864), stream=stream0)
    # Topologically Sorted Source Nodes: [eq_4, zeros_like_4, feature_sparse, embedding_21], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf51 = torch.ops.aten.embedding.default(
        arg43_1, buf50, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf52 = buf51

    buf53 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sparse_lookup_embedding, sum_5], Original ATen: [aten.mul, aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_mul_sum_3.run(
        buf52, arg42_1, buf53, 128, 16, 38, grid=grid(128, 16), stream=stream0)

    buf54 = empty_strided((128, 38), (38, 1), device='npu', dtype=torch.int64)
    # Topologically Sorted Source Nodes: [eq_5, zeros_like_5, feature_sparse_1], Original ATen: [aten.eq, aten.zeros_like, aten.where]
    stream0 = get_raw_stream(0)
    triton_unk_fused_eq_where_zeros_like_2.run(
        arg44_1, buf54, 4864, grid=grid(4864), stream=stream0)
    # Topologically Sorted Source Nodes: [eq_5, zeros_like_5, feature_sparse_1, embedding_22], Original ATen: [aten.eq, aten.zeros_like, aten.where, aten.embedding]
    buf55 = torch.ops.aten.embedding.default(
        arg45_1, buf54, padding_idx=-1, scale_grad_by_freq=False, sparse=False)

    buf56 = buf55

    buf57 = empty_strided((128, 16), (16, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [sparse_lookup_embedding_1, sum_6], Original ATen: [aten.mul, aten.sum]
    stream0 = get_raw_stream(0)
    triton_unk_fused_mul_sum_3.run(
        buf56, arg44_1, buf57, 128, 16, 38, grid=grid(128, 16), stream=stream0)

    buf58 = empty_strided((128, 368), (368, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [input_features], Original ATen: [aten.cat]
    stream0 = get_raw_stream(0)
    triton_unk_fused_cat_4.run(buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27,
                               buf29, buf31, buf33, buf37, buf41, buf45, buf49, buf53, buf57, buf58, 128, 368, grid=grid(128, 368), stream=stream0)

    buf59 = empty_strided((128, 368), (368, 1),
                          device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
    stream0 = get_raw_stream(0)
    triton_unk_fused_cat_5.run(
        buf58, buf59, 128, 368, grid=grid(128, 368), stream=stream0)

    # Topologically Sorted Source Nodes: [fc], Original ATen: [aten.addmm]
    buf60 = torch.ops.aten.mm.default(
        buf59, reinterpret_tensor(arg46_1, (368, 256), (1, 368), 0))

    buf61 = buf60

    buf62 = buf61   # reuse
    # Topologically Sorted Source Nodes: [fc, fc_1], Original ATen: [aten.addmm, aten.relu]
    stream0 = get_raw_stream(0)
    triton_unk_fused_addmm_relu_6.run(
        buf62, arg47_1, 128, 256, grid=grid(128, 256), stream=stream0)

    # Topologically Sorted Source Nodes: [fc, fc_1, fc_3], Original ATen: [aten.addmm, aten.relu]
    buf63 = torch.ops.aten.mm.default(
        buf62, reinterpret_tensor(arg48_1, (256, 128), (1, 256), 0))

    buf64 = buf63

    buf65 = buf64   # reuse
    # Topologically Sorted Source Nodes: [fc_3, fc_4], Original ATen: [aten.addmm, aten.relu]
    stream0 = get_raw_stream(0)
    triton_unk_fused_addmm_relu_7.run(
        buf65, arg49_1, 128, 128, grid=grid(128, 128), stream=stream0)

    # Topologically Sorted Source Nodes: [fc_3, fc_4, ctr_logit], Original ATen: [aten.addmm, aten.relu, aten.mm]
    buf66 = torch.ops.aten.mm.default(
        buf65, reinterpret_tensor(arg54_1, (128, 1), (1, 128), 0))

    buf67 = buf66

    # Topologically Sorted Source Nodes: [fc_6], Original ATen: [aten.addmm]
    buf68 = torch.ops.aten.mm.default(
        buf59, reinterpret_tensor(arg50_1, (368, 256), (1, 368), 0))

    buf69 = buf68

    buf70 = buf69   # reuse
    # Topologically Sorted Source Nodes: [fc_6, fc_7], Original ATen: [aten.addmm, aten.relu]
    stream0 = get_raw_stream(0)
    triton_unk_fused_addmm_relu_6.run(
        buf70, arg51_1, 128, 256, grid=grid(128, 256), stream=stream0)

    # Topologically Sorted Source Nodes: [fc_6, fc_7, fc_9], Original ATen: [aten.addmm, aten.relu]
    buf71 = torch.ops.aten.mm.default(
        buf70, reinterpret_tensor(arg52_1, (256, 128), (1, 256), 0))

    buf72 = buf71

    buf73 = buf72   # reuse
    # Topologically Sorted Source Nodes: [fc_9, fc_10], Original ATen: [aten.addmm, aten.relu]
    stream0 = get_raw_stream(0)
    triton_unk_fused_addmm_relu_7.run(
        buf73, arg53_1, 128, 128, grid=grid(128, 128), stream=stream0)

    # Topologically Sorted Source Nodes: [fc_9, fc_10, cvr_logit], Original ATen: [aten.addmm, aten.relu, aten.mm]
    buf74 = torch.ops.aten.mm.default(
        buf73, reinterpret_tensor(arg55_1, (128, 1), (1, 128), 0))

    buf75 = buf74

    buf76 = empty_strided((128, 2), (2, 1), device='npu', dtype=torch.float32)
    # Topologically Sorted Source Nodes: [task_outs], Original ATen: [aten.cat]
    stream0 = get_raw_stream(0)
    triton_unk_fused_cat_8.run(
        buf67, arg56_1, buf75, buf76, 128, 2, grid=grid(128, 2), stream=stream0)

    return (reinterpret_tensor(buf76, (128, ), (2, ), 0), reinterpret_tensor(buf76, (128, ), (2, ), 1), )


def benchmark_compiled_module(times=10, repeat=10):
    from torch._dynamo.testing import rand_strided
    from torch._inductor.utils import print_performance
    arg0_1 = rand_strided((289094, 16), (16, 1),
                          device='npu:0', dtype=torch.float32)
    arg1_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg2_1 = rand_strided(
        (98, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg3_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg4_1 = rand_strided(
        (14, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg5_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg6_1 = rand_strided(
        (3, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg7_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg8_1 = rand_strided(
        (8, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg9_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg10_1 = rand_strided(
        (4, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg11_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg12_1 = rand_strided(
        (4, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg13_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg14_1 = rand_strided(
        (3, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg15_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg16_1 = rand_strided(
        (5, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg17_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg18_1 = rand_strided((1820039, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg19_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg20_1 = rand_strided((8202, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg21_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg22_1 = rand_strided((507539, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg23_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg24_1 = rand_strided((179390, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg25_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg26_1 = rand_strided((7335, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg27_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg28_1 = rand_strided((293512, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg29_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg30_1 = rand_strided((115538, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg31_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg32_1 = rand_strided(
        (4, 16), (16, 1), device='npu:0', dtype=torch.float32)
    arg33_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.int64)
    arg34_1 = rand_strided(
        (128, 50), (50, 1), device='npu:0', dtype=torch.int64)
    arg35_1 = rand_strided((11734, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg36_1 = rand_strided(
        (128, 50), (50, 1), device='npu:0', dtype=torch.int64)
    arg37_1 = rand_strided((1353406, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg38_1 = rand_strided(
        (128, 50), (50, 1), device='npu:0', dtype=torch.int64)
    arg39_1 = rand_strided((315511, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg40_1 = rand_strided(
        (128, 50), (50, 1), device='npu:0', dtype=torch.int64)
    arg41_1 = rand_strided((97182, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg42_1 = rand_strided(
        (128, 38), (38, 1), device='npu:0', dtype=torch.int64)
    arg43_1 = rand_strided((98166, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg44_1 = rand_strided(
        (128, 38), (38, 1), device='npu:0', dtype=torch.int64)
    arg45_1 = rand_strided((81913, 16), (16, 1),
                           device='npu:0', dtype=torch.float32)
    arg46_1 = rand_strided((256, 368), (368, 1),
                           device='npu:0', dtype=torch.float32)
    arg47_1 = rand_strided((256, ), (1, ), device='npu:0', dtype=torch.float32)
    arg48_1 = rand_strided((128, 256), (256, 1),
                           device='npu:0', dtype=torch.float32)
    arg49_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.float32)
    arg50_1 = rand_strided((256, 368), (368, 1),
                           device='npu:0', dtype=torch.float32)
    arg51_1 = rand_strided((256, ), (1, ), device='npu:0', dtype=torch.float32)
    arg52_1 = rand_strided((128, 256), (256, 1),
                           device='npu:0', dtype=torch.float32)
    arg53_1 = rand_strided((128, ), (1, ), device='npu:0', dtype=torch.float32)
    arg54_1 = rand_strided(
        (1, 128), (128, 1), device='npu:0', dtype=torch.float32)
    arg55_1 = rand_strided(
        (1, 128), (128, 1), device='npu:0', dtype=torch.float32)
    arg56_1 = rand_strided((1, ), (1, ), device='npu:0', dtype=torch.float32)

    def fn(): return call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1, arg13_1, arg14_1, arg15_1, arg16_1, arg17_1, arg18_1, arg19_1, arg20_1, arg21_1, arg22_1, arg23_1, arg24_1, arg25_1, arg26_1, arg27_1,
                           arg28_1, arg29_1, arg30_1, arg31_1, arg32_1, arg33_1, arg34_1, arg35_1, arg36_1, arg37_1, arg38_1, arg39_1, arg40_1, arg41_1, arg42_1, arg43_1, arg44_1, arg45_1, arg46_1, arg47_1, arg48_1, arg49_1, arg50_1, arg51_1, arg52_1, arg53_1, arg54_1, arg55_1, arg56_1])
    return print_performance(fn, times=times, repeat=repeat)


if __name__ == "__main__":
    from torch._inductor.wrapper_benchmark import compiled_module_main
    compiled_module_main('None', benchmark_compiled_module)
