#include <cuda.h>
#include <cuda_runtime.h>

#include <vector>

#include <math.h>
#include <stdio.h>
#include <stdlib.h>

#include <iostream>
#define MAX_GRID_SIZE 65535
#define WARP_SIZE 32

#define CHECK_CUDA_ERROR(val) check((val), #val, __FILE__, __LINE__)
void check(cudaError_t err, char const *const func, char const *const file,
           int const line) {
  if (err != cudaSuccess) {
    std::cerr << "CUDA Runtime Error at: " << file << ":" << line << std::endl;
    std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
    std::exit(EXIT_FAILURE);
  }
}

#define CHECK_LAST_CUDA_ERROR() checkLast(__FILE__, __LINE__)
void checkLast(char const *const file, int const line) {
  cudaError_t const err{cudaGetLastError()};
  if (err != cudaSuccess) {
    std::cerr << "CUDA Runtime Error at: " << file << ":" << line << std::endl;
    std::cerr << cudaGetErrorString(err) << std::endl;
    std::exit(EXIT_FAILURE);
  }
}

/**
 * 定义一个block中可用的最大 thread 数量
 */
#if defined(__HIP_PLATFORM_HCC__)
constexpr int MAX_BLOCK_SIZE = 256;
#else
constexpr int MAX_BLOCK_SIZE = 512;
#endif

/**
 * 获取最高有效位
 */
__device__ __forceinline__ int getMSB(int val) {
  if (val <= 0) return -1;
  return 31 - __clz(val);
}

/**
 * @desc: 用于在同一个 warp（线程束）内的不同线程之间交换数据。
 *  基于 lane ID 异或一个 mask 来选择源线程, 用来实现快速的reduction。
 * @param
 *  [IN] value: 当前线程要广播或参与交换的数据
 *  [IN] laneMask:  与当前线程的 lane ID 做异或操作，决定从哪个线程获取数据
 *  [IN] width:  warp 的有效宽度（通常为 32），用于部分归约等操作
 *  [IN] mask:   同步掩码（仅在 CUDA 9.0+ 使用 __shfl_xor_sync 时需要)
 */
template <typename T>
__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask,
                                           int width = warpSize,
                                           unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
  return __shfl_xor_sync(mask, value, laneMask, width);
#else
  return __shfl_xor(value, laneMask, width);
#endif
}

/**
 * @desc: 根据输入的元素个数来设置 block内的thread 数量，最大为 MAX_BLOCK_SIZE
 * @param:
 *  [IN] nElem: 元素个数
 */
int getNumThreads(int nElem) {
//  HIP（Heterogeneous-computing Interface for Portability）平台，一个跨平台的
//  GPU 编程框架
#if defined(__HIP_PLATFORM_HCC__)
  int threadSizes[5] = {16, 32, 64, 128, MAX_BLOCK_SIZE};
#else
  int threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
#endif
  for (int i = 0; i != 5; ++i) {
    if (nElem <= threadSizes[i]) {
      return threadSizes[i];
    }
  }
  return MAX_BLOCK_SIZE;
}

/**
 * @desc:
 *  计算统计数据, 即为 平均值（mean) 和方差（variance)
 * @param:
 *  [IN] input: 输入数据
 *  [OUT] save_mean: 统计的平均值的数据, 需要申请的空间为C * sizeof(T)
 *  [OUT] save_var:  统计的方差数据
 *  [IN] {N,C,H,W}: 输入的形状
 *  [IN] eps: epsilon,
 */
template <typename T>
__global__ void bnCollectStatisticsKernel(T *input,      // input
                                          T *save_mean,  // avg
                                          T *save_var,   // var
                                          int N, int C, int H, int W,
                                          double eps) {
  // (var[WARP] + mean[WARP]) *2
  __shared__ int shared_n[2 * 2 * WARP_SIZE + WARP_SIZE];

  // 维度融合 3D tensor: {high_dim, mid_dim, low_dim}
  int high_dim = N;     // size0
  int mid_dim = C;      // size1
  int low_dim = H * W;  // size2
  // 此处需要画图解释拆分逻辑
  int plane = blockIdx.x;      // plane < = low_dim
  int M = high_dim * low_dim;  // size0 * size2

  int tid = threadIdx.x + threadIdx.y * blockDim.x;

  /**
   * 计算（batch, x/y/z）的平均值和方差
   * 此处使用了 Welford 算法用于对整个block求和。
   * https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
   * 使用两次 shuffle 来reduce block.
   */
  T *shared_avg_var = (T *)&shared_n[WARP_SIZE];

  /**
   * Step1. 每个thread要计算自己的一部分 mean 和 var
   */
  T avg = 0;
  T var_n = 0;
  int n = 0;
  for (int batch = threadIdx.y; batch < high_dim; batch += blockDim.y) {
    for (int x = threadIdx.x; x < low_dim; x += blockDim.x) {
      T v = input[batch * mid_dim * low_dim + plane * low_dim + x];
      T d1 = v - avg;
      n++;
      avg += d1 / n;
      var_n += d1 * (v - avg);
    }
  }

  /**
   * Step2. 第一次warpSum; 其中，getMSB(WARP_SIZE) = 5
   **/
  for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
    T o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
    int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
    T factor = (T)(1.0 / fmax(double(1.0), double(n + o_n)));
    var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) +
             (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
    avg = (n * avg + o_n * o_avg) * factor;
    n += o_n;
  }

  /**
   * Step3. 将每个warp中的数据写入 sram.
   */
  __syncthreads();
  if (tid % WARP_SIZE == 0) {
    shared_n[tid / WARP_SIZE] = n;
    shared_avg_var[tid / WARP_SIZE * 2] = avg;
    shared_avg_var[tid / WARP_SIZE * 2 + 1] = var_n;
  }
  __syncthreads();

  /**
   * Step4. 第二次warpSum.
   */
  if (tid < WARP_SIZE) {
    n = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_n[tid] : 0);
    avg = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid]
                                                     : 0);
    var_n =
        (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid + 1]
                                                   : 0);
  }
  for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
    T o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE);
    int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE);
    T factor = static_cast<T>(1.0 / fmax(double(1.0), double(n + o_n)));
    var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) +
             (avg - o_avg) * (avg - o_avg) * n * o_n * factor;
    avg = (n * avg + o_n * o_avg) * factor;
    n += o_n;
  }

  /**
   * Step5. save mean and variance, and moving averages
   */
  if (tid == 0) {
    if (save_mean != NULL) {
      save_mean[plane] = avg;
    }
    if (save_var != NULL) {
      // 由于担心累加精度， 此处可以先不做sqrt， 另有kernel做sqrt，求倒数
      save_var[plane] = var_n / M;
      // 如果不担心累加精度，可以先做sqrt和求倒数
      // save_var[plane] = static_cast<T>(1 / sqrt(var_n / M + eps));
    }
  }
}

/**
 * @desc:  根据 EMA 更新 mean和var
 * @param:
 *  [in] save_mean: 推理过程的 mean 或者上次训练过程中保留的var， 是全局的var
 *  [in] save_var: 推理过程中的 var 或者是上次训练过程中保留的var；
 *  [out] running_mean: EMA 更新后的mean
 *  [out] running_var: EMA 更新后的var
 *  [in] momentum: EMA中的调整幅度，是一个动量
 *  [in] eps: 该参数用于计算 save_var
 *  [in] N: 同一个平面上所有的元素， 应为 N* H*W
 *  [in] size: 控制一个thread内的数量，应为block_dim
 */
template <typename T>
__global__ void bnUpdateStatsAndInvertKernel(T *save_mean, T *save_var,
                                             T *running_mean, T *running_var,
                                             float momentum, float eps, int N,
                                             int size) {
  int index = blockIdx.x * blockDim.x + threadIdx.x;

  if (index < size) {
    // 根据 save var， save mean 更新 running mean， running var
    running_mean[index] =
        save_mean[index] * momentum + (1 - momentum) * running_mean[index];
    running_var[index] =
        save_var[index] * static_cast<T>(N) * momentum / static_cast<T>(N - 1) +
        (1 - momentum) * running_var[index];
    // 更新 save_var。
    // 了保证精度，在 bnCollectStatisticsKernel 中没有对 var 取倒数
    save_var[index] =
        static_cast<T>(1.0f / sqrtf(static_cast<float>(save_var[index] + eps)));
  } else {
    return;
  }
}

/**
 * @desc: 实现公式3 中的 分母的计算： invstd = 1 / sqrt(var + eps)
 * @param:
 *  [out] out_invstd: 共十三中的分母部分
 *  [in] running_var: 局部 var
 *  [in] eps: 防止分母为0，而添加的eps
 *  [in] size: 元素的个数，指代公式3中的m
 */
template <typename T>
__global__ void bnCalcInvstdKernel(T *out_invstd, T *running_var, float eps,
                                   int size) {
  int index = blockIdx.x * blockDim.x + threadIdx.x;

  if (index < size) {
    out_invstd[index] = static_cast<T>(
        1.f / sqrtf(static_cast<float>(running_var[index] + eps)));
  }
}

/**
 * @desc: 统计一个平面上的 mean和 var
 * @param:
 *  [IN] stream:
 *  [OUT] avg: avg 的数据, 长度为C
 *  [OUT] var_ptr: var 数据, 长度为C
 *  [IN] src: 输入数据, 大小为 n * c * h * w
 *  [IN] {n, c, h, w}: 输入的shape 信息
 *  [IN] eps:  计算var时，为防止分母为0增加的 epsilon
 */
template <typename T>
int bnUpdateStats(cudaStream_t stream,
                  int n, int c, int h, int w,
                  T *src_ptr,
                  T *running_mean_ptr, T *running_var_ptr, 
                  T *save_mean_ptr, T *save_var_ptr, 
                  float momentum, float eps) {
  /**
   * grid 切分C方向, 每个grid内的block需要计算 N,H*W 平面上的mean和var
   */
  dim3 grid = c;
  int tf = getNumThreads(h * w);
  dim3 block(tf, std::max<int>(1, MAX_BLOCK_SIZE / tf));
  int shared_mem = WARP_SIZE * 5 * sizeof(int);
  bnCollectStatisticsKernel<T><<<grid, block, shared_mem, stream>>>(
      src_ptr, save_mean_ptr, save_var_ptr, n, c, h, w, eps);
  CHECK_LAST_CUDA_ERROR();

  if (running_mean_ptr && running_var_ptr) {
    // setting save
    grid.x = std::ceil(static_cast<float>(c) / 256);
    grid.y = 1;
    grid.z = 1;

    block.x = 256;
    block.y = 1;
    block.z = 1;
    int plane_size = n * h * w;
    // 求var部分的sqrt和倒数
    // save_var_ptr = 1/sqet(running_var_ptr)
    // save_mean_ptr = running_mean_ptr
    bnUpdateStatsAndInvertKernel<T><<<grid, block, 0, stream>>>(
        save_mean_ptr, save_var_ptr, running_mean_ptr, running_var_ptr,
        momentum, eps, plane_size, c);
    CHECK_LAST_CUDA_ERROR();
  } else {
    grid.x = std::ceil(static_cast<float>(c) / 256);
    grid.y = 1;
    grid.z = 1;

    block.x = 256;
    block.y = 1;
    block.z = 1;
    // 更新save_var_ptr = 1/sqrt(save_var_ptr)
    bnCalcInvstdKernel<T>
        <<<grid, block, 0, stream>>>(save_var_ptr, save_var_ptr, eps, c);
    CHECK_LAST_CUDA_ERROR();
  }

  CHECK_CUDA_ERROR(cudaDeviceSynchronize());
  return 0;
}

/**
 * @desc: 实现公式3 和公式4
 * @param:
 *  [out] dst_ptr:
 *  [in] src_ptr:
 *  [in] scale_ptr:
 *  [in] shift_ptr:
 *  [in] save_mean_ptr:
 *  [in] save_var_ptr:
 *  [in] {n, c, h, w}:
 */
template <typename T>
__global__ void bnForwardKernel(T *src_ptr, const T *dst_ptr,
                                const T *scale_ptr, const T *shift_ptr,
                                const T *save_mean_ptr, const T *save_var_ptr,
                                const int n, const int c, const int h,
                                const int w) {
  int plane = blockIdx.x;

  if (plane >= c) {
    return;
  }

  T gamma = scale_ptr[plane];
  T beta = shift_ptr[plane];
  T mean = save_mean_ptr[plane];
  T invstd = save_var_ptr[plane];

  int bs = n;
  int fs = h * w;
  int bstep = blockDim.y * gridDim.y;
  for (int batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs;
       batch += bstep) {
    for (int feature = threadIdx.x; feature < fs; feature += blockDim.x) {
      dst_ptr[batch * c * h * w + plane * h * w + feature] = static_cast<T>(
          gamma *
              (src_ptr[batch * c * h * w + plane * h * w + feature] - mean) *
              invstd +
          beta);
    }
  }
}

/**
 * @desc:
 * @param:
 *  [in] stream:
 *  []
 *  
 *
 */
template <typename T>
int bnForward(cudaStream_t stream, T *dst_ptr, const T *src_ptr,
              const T *mean_ptr, T *var_ptr, T *scale_ptr, T *shift_ptr, int n,
              int c, int h, int w) {
  int tf = std::max<int>(getNumThreads(h * w / 4),
                         std::min<int>(getNumThreads(h * w), 64));
  int tb = std::max<int>(64 / tf, 1);
  dim3 grid(
      c, std::max<int>(1, std::min<int>((256 * 1024) / c, (n + tb - 1) / tb)));
  grid.y = std::min<int>(grid.y, 65535);
  dim3 block(tf, tb);
  bnForwardKernel<T><<<grid, block, 0, stream>>>(src_ptr, dst_ptr,
                                                 scale_ptr, shift_ptr,
                                                 mean_ptr, var_ptr,
                                                 n, c, h, w);
  return 0;
}

/**
 * batch norm --> bnUpdateStats, bnForward
 *            --> bnForward
 */
template <typename T>
int BatchNorm(cudaStream_t stream,
              T *dst,
              T *save_mean, T *save_invstd,
              T *src,
              const T *weight, T *bias,
              T *running_mean, T *running_var,
              std::vector<int> in_shape, bool training, double momentum,
              double eps) {
  int ret;
  
  if (in_shape.size() != 4) {
    std::cout << "[ERROR] (BatchNorm): only support input.dim = 4";
    return -1;
  }

  uint32_t n = in_shape[0];
  uint32_t c = in_shape[1];
  uint32_t h = in_shape[2];
  uint32_t w = in_shape[3];

  float local_momentum = static_cast<float>(momentum);
  float local_eps = static_cast<float>(eps);

  printf(
      "batch norm forward n = %d, c = %d, h = %d, w = %d,  training = %d , "
      "momentum=%f, eps=%f\n",
      n, c, h, w, (int)training, (float)momentum, (float)eps);

  if (training) { 
    // EMA 估算 var, mean  has test
    // running_mean == nullptr, running_var == nullptr, if training == false, 
    ret = bnUpdateStats<T>(stream,
                           n, c, h, w, 
                           src, 
                           running_mean, running_var,  // maybe nullptr
                           save_mean, save_invstd,     // save_var
                           local_momentum, local_eps);
    if (ret != 0) {
      std::cout << "[ERROR] (bnUpdateStats): ret:" << ret << std::endl;
    }
    // 
    ret = bnForward<T>(stream,
                       n, c, h, w,
                       dst,
                       src,
                       weight, bias,
                       save_mean, save_invstd,
                       local_eps, training);
    if (ret != 0) {
      std::cout << "[ERROR] (bnUpdateStats): ret:" << ret << std::endl;
    }
  } else {
    // 如果 train 为 flase， 意味着 
    dim3 grid = std::ceil(static_cast<float>(c) / 256);
    dim3 block = 256;
    // malloc workspace
    void *workspace = nullptr;
    size_t elems = n * c * h * w;
    CHECK_CUDA_ERROR(cudaMalloc(&src, elems * sizeof(T)));
    // 仅对 var求倒数
    bnCalcInvstdKernel<T> <<<grid, block, 0, stream>>>((T *)workspace, running_var, local_eps, c);
    // 
    ret = bnForward<T>(stream,
                       n, c, h, w,
                       dst, src,
                       weight, bias,
                       running_mean, (T *)workspace,
                       local_eps, training);
    if (ret != 0) {
      std::cout << "[ERROR] (bnForward): ret:" << ret << std::endl;
    }
  }

  return 0;
}

int main(void) {
  // input shape
  int N = 32;
  int C = 32;
  int H = 32;
  int W = 32;
  int in_elems = N * C * H * W;
  int var_elems = C;
  int mean_elems = C;

  // input host
  float *src_host = (float *)malloc(in_elems * sizeof(float));
  for (int i = 0; i < in_elems; ++i) {
    src_host[i] = 1;
  }

  // float *dst_host = nullptr;
  float *src_dev = nullptr;
  float *dst_dev = nullptr;
  float *mean_dev = nullptr;
  float *var_dev = nullptr;
  CHECK_CUDA_ERROR(cudaMalloc(&src_dev, in_elems * sizeof(float)));
  CHECK_CUDA_ERROR(cudaMalloc(&dst_dev, in_elems * sizeof(float)));
  CHECK_CUDA_ERROR(cudaMalloc(&mean_dev, mean_elems * sizeof(float)));
  CHECK_CUDA_ERROR(cudaMalloc(&var_dev, var_elems * sizeof(float)));

  // memcpy H2D
  cudaMemcpy(src_dev, src_host, in_elems * sizeof(float), cudaMemcpyHostToDevice);
  float eps = 1e-5;
  double momentum = 1e-3;
  bool training = true;
  // create 
  cudaStream_t stream;
  cudaStreamCreate(&stream);

  // memcpy D2H
  float *mean_host = (float *)malloc(mean_elems * sizeof(float));
  cudaMemcpy(mean_host, mean_dev, mean_elems * sizeof(float),
             cudaMemcpyDeviceToHost);
  // cuda::device::current::get().default_stream().synchronize();
  // free on host
  free(src_host);
  free(mean_host);
  // feee on device
  cudaFree(src_dev);
  cudaFree(dst_dev);
  cudaFree(mean_dev);
  cudaFree(var_dev);
}
