project_json_src = '''
[
  {
    "op": "MaxReductionOverADimensionCustom",
    "language": "cpp",
    "input_desc": [
      {"name": "x", "format": ["ND"], "type": ["float"], "param_type": "required"}
    ],
    "output_desc": [
      {"name": "y", "format": ["ND"], "type": ["float"], "param_type": "required"}
    ],
    "attr": [
      {"name": "dim", "type": ["int64_t"], "default_value": 1, "param_type": "required"}
    ]
  }
]
'''

host_tiling_src = """
#pragma once
#include "register/tilingdata_base.h"

namespace optiling {
BEGIN_TILING_DATA_DEF(MaxReductionOverADimensionCustomTilingData)
  TILING_DATA_FIELD_DEF(uint32_t, outer);
  TILING_DATA_FIELD_DEF(uint32_t, reduceDim);
  TILING_DATA_FIELD_DEF(uint32_t, inner);
END_TILING_DATA_DEF;

REGISTER_TILING_DATA_CLASS(MaxReductionOverADimensionCustom,
                           MaxReductionOverADimensionCustomTilingData)
} // namespace optiling
"""
host_operator_src = """
#include "max_reduction_over_a_dimension_custom_tiling.h"
#include "register/op_def_registry.h"

namespace optiling {
const uint32_t BLOCK_DIM = 32;

// 修改TilingFunc
static ge::graphStatus TilingFunc(gert::TilingContext* context) {
  MaxReductionOverADimensionCustomTilingData tiling;
  const gert::StorageShape* x_storage = context->GetInputShape(0);
  const gert::Shape xshape = x_storage->GetStorageShape();

  int64_t dim = *context->GetAttrs()->GetAttrPointer<int64_t>(0);
  const int32_t ndims = xshape.GetDimNum();

  if (dim < 0) dim += ndims;
  uint32_t outer = 1, inner = 1;
  for (int i = 0; i < dim; ++i) outer *= xshape.GetDim(i);
  for (int i = dim + 1; i < ndims; ++i) inner *= xshape.GetDim(i);

  tiling.set_outer(outer);
  tiling.set_reduceDim(xshape.GetDim(dim));
  tiling.set_inner(inner);

  context->SetBlockDim(BLOCK_DIM);
  tiling.SaveToBuffer(context->GetRawTilingData()->GetData(),
                      context->GetRawTilingData()->GetCapacity());
  context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
  context->SetTilingKey(1);
  return ge::GRAPH_SUCCESS;
}
}  // namespace optiling

namespace ge {
static ge::graphStatus InferShape(gert::InferShapeContext* context) {
  const gert::Shape* x_shape = context->GetInputShape(0);
  gert::Shape* y_shape = context->GetOutputShape(0);

  int64_t dim = *context->GetAttrs()->GetAttrPointer<int64_t>(0);
  int ndims = x_shape->GetDimNum();
  if (dim < 0) dim += ndims;

  if (ndims == 1) {
    y_shape->SetDimNum(0);
    return GRAPH_SUCCESS;
  }

  y_shape->SetDimNum(ndims - 1);
  int y_idx = 0;
  for (int i = 0; i < ndims; ++i) {
    if (i != dim) y_shape->SetDim(y_idx++, x_shape->GetDim(i));
  }
  return GRAPH_SUCCESS;
}
}  // namespace ge

namespace ops {
class MaxReductionOverADimensionCustom : public OpDef {
 public:
  explicit MaxReductionOverADimensionCustom(const char* name) : OpDef(name) {
    this->Input("x")
        .ParamType(REQUIRED)
        .DataType({ge::DT_FLOAT})
        .Format({ge::FORMAT_ND});
    this->Output("y")
        .ParamType(REQUIRED)
        .DataType({ge::DT_FLOAT})
        .Format({ge::FORMAT_ND});
    this->Attr("dim").AttrType(REQUIRED).Int(1);
    this->SetInferShape(ge::InferShape);
    this->AICore()
        .SetTiling(optiling::TilingFunc)
        .AddConfig("ascend910b");
  }
};
OP_ADD(MaxReductionOverADimensionCustom);
} // namespace ops
"""
kernel_src = """
#include "kernel_operator.h"
using namespace AscendC;

class KernelMaxReductionOverADimensionCustom {
 public:
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    outer = tiling_data.outer;
    reduceDim = tiling_data.reduceDim;
    inner = tiling_data.inner;
    xGm.SetGlobalBuffer((__gm__ float*)x, outer * reduceDim * inner);
    yGm.SetGlobalBuffer((__gm__ float*)y, outer * inner);
  }

  __aicore__ inline void Process() {
    for (uint32_t o = 0; o < outer; ++o) {
      for (uint32_t in = 0; in < inner; ++in) {
        float maxVal = -3.4e38f;
        for (uint32_t r = 0; r < reduceDim; ++r) {
          float v = xGm.GetValue(o * reduceDim * inner + r * inner + in);
          if (v > maxVal) maxVal = v;
        }
        yGm.SetValue(o * inner + in, maxVal);
      }
    }
  }

 private:
  GlobalTensor<float> xGm;
  GlobalTensor<float> yGm;
  uint32_t outer, reduceDim, inner;
};

extern "C" __global__ __aicore__ __attribute__((visibility("default")))
void max_reduction_over_a_dimension_custom(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
  KernelMaxReductionOverADimensionCustom op;
  op.Init(x, y, tiling);
  if (TILING_KEY_IS(1)) op.Process();
}
"""
python_bind_src = """
// python/python_bind.cc
#include <torch/library.h>
#include "pytorch_npu_helper.hpp"
#include <torch/extension.h>
#include <vector>
#include <cstdint>

static inline std::vector<int64_t> infer_out_sizes(const at::Tensor& x, int64_t dim) {
    const int64_t ndims = x.dim();
    int64_t d = dim;
    if (d < 0) d += ndims;
    if (ndims == 1) return {};
    std::vector<int64_t> out;
    out.reserve(ndims - 1);
    for (int64_t i = 0; i < ndims; ++i) {
        if (i != d) out.push_back(x.size(i));
    }
    return out;
}

static at::Tensor max_reduction_over_a_dimension_custom_impl_npu(const at::Tensor& x, int64_t dim) {
    const auto out_sizes = infer_out_sizes(x, dim);
    at::Tensor result = at::empty(out_sizes, x.options());
    const int64_t d = (dim < 0) ? dim + x.dim() : dim;
    EXEC_NPU_CMD(aclnnMaxReductionOverADimensionCustom, x, d, result);
    return result;
}

// 定义 schema（namespace::op）  
TORCH_LIBRARY(myops, m) {
    m.def("max_reduction_over_a_dimension_custom(Tensor x, int dim) -> Tensor");
}

// 注册实现  
TORCH_LIBRARY_IMPL(myops, PrivateUse1, m) {
    m.impl("max_reduction_over_a_dimension_custom", &max_reduction_over_a_dimension_custom_impl_npu);
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("max_reduction_over_a_dimension_custom",
          &max_reduction_over_a_dimension_custom_impl_npu,
          "Max reduction over a dimension (AscendC/NPU)");
}
"""
model_src = '''
import torch
import torch_npu
import custom_ops_lib

class ModelNew(torch.nn.Module):
    def __init__(self, dim: int):
        super().__init__()
        self.dim = dim

    def forward(self, x: torch.Tensor):
        return custom_ops_lib.max_reduction_over_a_dimension_custom(x, self.dim)
'''
