#ifndef PREPROCESS_KERNEL_CUH
#define PREPROCESS_KERNEL_CUH
#include <iostream>
#include <cuda_runtime.h>

#define GPU_BLOCK_THREADS 512 // 512=32*16
#define INTER_RESIZE_COEF_BITS 11
#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS)
#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1)

namespace CUDAKernel
{
    template <typename _T>
    static __inline__ __device__ _T clipf(_T value, _T low, _T high)
    {
        return value < low ? low : (value > high ? high : value);
    }

    static __inline__ __device__ int resize_cast(int value)
    {
        return (value + (1 << (CAST_BITS - 1))) >> CAST_BITS;
    }

    enum class NormType : int
    {
        None = 0,
        MeanStd = 1,
        AlphaBeta = 2
    };

    enum class ChannelType : int
    {
        None = 0,
        Invert = 1
    };

    struct Norm
    {
        float mean[3];
        float std[3];
        float alpha, beta;
        NormType type = NormType::None;
        ChannelType channel_type = ChannelType::None;

        // out = (x * alpha - mean) / std
        static Norm mean_std(const float mean[3], const float std[3], float alpha = 1 / 255.0f, ChannelType channel_type = ChannelType::None);

        // out = x * alpha + beta
        static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type = ChannelType::None);

        // None
        static Norm None();
    };

    dim3 grid_dims(int numJobs)
    {
        int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS;
        return dim3((numJobs - 1) / numBlockThreads + 1); // 其实就是num_threads/block.x的向上取整的一种写法
    }

    dim3 block_dims(int numJobs)
    {
        return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS;
    }

    void warp_affine_bilinear_and_normalize_plane(
        uint8_t *src, int src_line_size, int src_width, int src_height,
        float *dst, int dst_width, int dst_height,
        float *matrix_2_3, uint8_t const_value, const Norm &norm,
        cudaStream_t stream);

};

#endif // PREPROCESS_KERNEL_CUH