template <typename T>
__device__ __inline__ T ppl_scalar_clip_opt(const T& in_val, float _min, float _max)
{
    float resf = (float)in_val;
    resf       = (resf > _min) ? resf : _min;
    resf       = (resf < _max) ? resf : _max;
    return (T)resf;
}


template <typename srcT, typename CalT, int Iter>
__global__ void ppl_cukernel_clip_ndarray(
    const uint64_t num_elems,
    const srcT* input,
    srcT* output,
    float _min,
    float _max)
{
    int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= num_elems)
        return;
    srcT in_val = input[index];
    srcT out_val;
    CalT* out_val_ptr = reinterpret_cast<CalT*>(&out_val);
    CalT* in_val_ptr = reinterpret_cast<CalT*>(&in_val);
    for (int it = 0; it < Iter; it++) {
        out_val_ptr[it] = ppl_scalar_clip_opt<CalT>(in_val_ptr[it], _min, _max);
    }
    output[index] = out_val;
}
template <typename T>
void test_kernel_launch(
    T* input, T* output,
    int in_batch, int in_height, int in_channels, int in_width,
    int out_batch, int out_height, int out_channels, int out_width,
    int in_elems, int out_elems,
    cudaStream_t stream)
{
    uint64_t num_elems = out_elems;
    float _min = 0.0f;              
    float _max = 1.0f;             

    int block_size = 256;
    int grid_size = (num_elems + block_size - 1) / block_size;

  
    ppl_cukernel_clip_ndarray<T, T, 1><<<grid_size, block_size, 0, stream>>>(
        num_elems, input, output, _min, _max);
}
