__global__ void ppl_cukernel_silu_fp16_pack_opt(const size_t count, const half2 *input, half2 *output) {
    #if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
        int index = blockIdx.x * blockDim.x + threadIdx.x;
        if(index >= count) return;
        half2 h_val = input[index];
        float2 f_val = __half22float2(h_val);

        half2 t_val;
        t_val.x = __float2half_rn(f_val.x / (1 + __expf(-f_val.x)));
        t_val.y = __float2half_rn(f_val.y / (1 + __expf(-f_val.y)));
        output[index] = t_val;
    #endif
}
template <typename T>
void test_kernel_launch(
    T* input,  T* output,
    int in_batch, int in_height, int in_channels, int in_width,
    int out_batch, int out_height, int out_channels, int out_width,
    int in_elems, int out_elems, cudaStream_t stream) 
{
    // Assuming T is half2 for this specific kernel call
    const int BS = 256;
    size_t count = out_elems / 2; // Since we are using half2, the count is halved
    uint64_t GS = (count + BS - 1) / BS;

    ppl_cukernel_silu_fp16_pack_opt<<<GS, BS, 0, stream>>>(
        count, reinterpret_cast<const half2*>(input), reinterpret_cast<half2*>(output));
}
