#ifndef COMMON_CUDA_HELPER
#define COMMON_CUDA_HELPER

#include <cuda.h>

#define CUDA_1D_KERNEL_LOOP(i, n)                              \
  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
       i += blockDim.x * gridDim.x)

#define CUDA_2D_KERNEL_LOOP(i, n, j, m)                             \
  for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);   \
       i += blockDim.x * gridDim.x)                                 \
    for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); \
         j += blockDim.y * gridDim.y)

#define CUDA_2D_KERNEL_BLOCK_LOOP(i, n, j, m)          \
  for (size_t i = blockIdx.x; i < (n); i += gridDim.x) \
    for (size_t j = blockIdx.y; j < (m); j += gridDim.y)

#define THREADS_PER_BLOCK 512
/**
 * 计算执行CUDA内核函数时所需的块数量。
 * 
 * 该函数旨在确定处理给定数量元素N时，最优的块数量。它考虑了每个块中的线程数量，
 * 并确保不会超过CUDA设备允许的最大块数量。
 * @param N 要处理的元素总数。
 * @param num_threads 每个块中期望的线程数，默认为THREADS_PER_BLOCK常量的值。
 * @return 返回计算出的块数量，既不会超过最大值，也能尽可能有效地处理所有元素。
 */
/*inline关键字作用为：，解决频繁调用的小函数大量消耗栈空间
  该函数功能为：计算需要的block数
  这里详细解释下，之前定义了每个block开辟的线程数为1024，开辟多少个block由数据量N决定，
  即N表示输出数据的元素个数。函数里面的代码其实就是实现了，当N/1024不为整数时，向上取整的功能。
  int 本身是向下取整的，举例 3.6->3，这里分三种情况来解释向上取整原理。
  （1）当N/num_threads为整数a时
  需要的block数就是a，a+num_threads - 1/num_threads >a 经过int运算为a；
  （2）当N/num_threads为整数a余1时
  需要的block数就是a+1，a+num_threads- 1/num_threads =a+1 经过int运算为a+1;
  (3)当N/num_threads为整数a余b时
  需要的block数还是a+1 a+num_threads - 1/num_threads >a+1 经过int运算为a+1;
  综上
*/
inline int GET_BLOCKS(const int N, const int num_threads = THREADS_PER_BLOCK) {
  // 计算基于元素总数和每个块的线程数，得出理想的块数量。
  int optimal_block_num = (N + num_threads - 1) / num_threads;
  // 定义CUDA设备允许的最大块数量。
  int max_block_num = 4096;
  // 返回实际使用的块数量，保证不超过最大值。
  return min(optimal_block_num, max_block_num);
}

template <typename T>
__device__ T bilinear_interpolate(const T* input, const int height,
                                  const int width, T y, T x,
                                  const int index /* index for debug only*/) {
  // deal with cases that inverse elements are out of feature map boundary
  if (y < -1.0 || y > height || x < -1.0 || x > width) return 0;

  if (y <= 0) y = 0;
  if (x <= 0) x = 0;

  int y_low = (int)y;
  int x_low = (int)x;
  int y_high;
  int x_high;

  if (y_low >= height - 1) {
    y_high = y_low = height - 1;
    y = (T)y_low;
  } else {
    y_high = y_low + 1;
  }

  if (x_low >= width - 1) {
    x_high = x_low = width - 1;
    x = (T)x_low;
  } else {
    x_high = x_low + 1;
  }

  T ly = y - y_low;
  T lx = x - x_low;
  T hy = 1. - ly, hx = 1. - lx;
  // do bilinear interpolation
  T v1 = input[y_low * width + x_low];
  T v2 = input[y_low * width + x_high];
  T v3 = input[y_high * width + x_low];
  T v4 = input[y_high * width + x_high];
  T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;

  T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);

  return val;
}

template <typename T>
__device__ void bilinear_interpolate_gradient(
    const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4,
    int& x_low, int& x_high, int& y_low, int& y_high,
    const int index /* index for debug only*/) {
  // deal with cases that inverse elements are out of feature map boundary
  if (y < -1.0 || y > height || x < -1.0 || x > width) {
    // empty
    w1 = w2 = w3 = w4 = 0.;
    x_low = x_high = y_low = y_high = -1;
    return;
  }

  if (y <= 0) y = 0;
  if (x <= 0) x = 0;

  y_low = (int)y;
  x_low = (int)x;

  if (y_low >= height - 1) {
    y_high = y_low = height - 1;
    y = (T)y_low;
  } else {
    y_high = y_low + 1;
  }

  if (x_low >= width - 1) {
    x_high = x_low = width - 1;
    x = (T)x_low;
  } else {
    x_high = x_low + 1;
  }

  T ly = y - y_low;
  T lx = x - x_low;
  T hy = 1. - ly, hx = 1. - lx;

  // reference in forward
  // T v1 = input[y_low * width + x_low];
  // T v2 = input[y_low * width + x_high];
  // T v3 = input[y_high * width + x_low];
  // T v4 = input[y_high * width + x_high];
  // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);

  w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;

  return;
}
#endif  // COMMON_CUDA_HELPER