template <typename T>
__global__ void ppl_cukernel_concat_opt(
    int64_t num_elems,
    const T* inputs,
    int64_t concat_size,
    int64_t top_axis_width,
    DivModFast num_elems_inner_fast,
    int axis_offset,
    T* output)
{
    for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
         i < num_elems;
         i += (int64_t)blockDim.x * gridDim.x) {
        int outer_idx, inner_idx;
        num_elems_inner_fast.divmod(i, outer_idx, inner_idx);
        int64_t top_idx = inner_idx +
                          (outer_idx * top_axis_width + axis_offset) * concat_size;
        output[top_idx] = inputs[i];
    }
}
template <typename T>
void test_kernel_launch(
    T* input, T* output,
    int in_batch, int in_height, int in_channels, int in_width,
    int out_batch, int out_height, int out_channels, int out_width,
    int in_elems, int out_elems,
    cudaStream_t stream)
{
    // 假设我们沿着通道轴进行concat操作 (axis = 1)
    int axis = 1;
    int num_inputs = 1; // 假设只有一个输入

    // 计算concat_size和top_axis_width
    int64_t concat_size = in_width * in_height;
    int64_t top_axis_width = out_channels;

    // 计算axis_offset
    int axis_offset = 0; // 假设从0开始

    // 计算num_elems
    int64_t num_elems = in_batch * in_height * in_width * in_channels;

    // 创建DivModFast对象
    DivModFast num_elems_inner_fast = DivModFast(in_channels * in_width * in_height);

    // 设置block_size和grid_size
    int block_size = 256;
    int grid_size = (num_elems + block_size - 1) / block_size;

    // 调用ppl_cukernel_concat_opt
    ppl_cukernel_concat_opt<<<grid_size, block_size, 0, stream>>>(
        num_elems,
        input,
        concat_size,
        top_axis_width,
        num_elems_inner_fast,
        axis_offset,
        output);
}
//!!!!!!!!!!!!!!!!!!!!!!!