/*
 * @Author: xiangru.xiao xiangru.xiao@mthreads.com
 * @Date: 2023-05-10 16:38:07
 * @LastEditors: xiangru.xiao xiangru.xiao@mthreads.com
 * @LastEditTime: 2023-05-17 14:13:40
 * Copyright (c) 2023 by Moore Threads, All Rights Reserved. 
 */
#pragma once
const size_t MAX_WORK_SIZE_BYTE = 1 << 30;
const size_t BLOCK_SIZE = 256;
const size_t ITERS_COM = 4;
const size_t DO_OPS = 512;
const size_t ITERS_MEM = 50;
const size_t DO_LOOPS = 1;

template <typename T>
struct ma_op {
  const int UNROLL_NUMS = DO_OPS / 2;
  __device__ inline void operator()(T &_1, T &_2) {
    _2 = _2 * _2 + _2;
  }
};

template <typename T, typename OP>
__global__ void computekernel(T in, T *out) {
  uint idx = blockIdx.x * blockDim.x + threadIdx.x;
  T x = in;
  T y = (float)idx;
  OP op;
#pragma unroll
  for (uint i = 0; i < op.UNROLL_NUMS; i++) {
    op(x, y);
  }
  out[idx] = y;
}

template <typename T>
__global__ void memorykernel(T *in, T *out) {
  uint idx = blockIdx.x * blockDim.x + threadIdx.x;
  uint total = gridDim.x * blockDim.x;
#pragma unroll
  for (uint i = 0; i < DO_LOOPS; i++) {
    out[idx] = in[idx];
    idx += total;
  }
}

template <typename T, template <typename> class OP>
float runComputeKernel(CUDADeviceInfo* deviceinfo) {
  const size_t WORK_SIZE_BYTE = std::min(MAX_WORK_SIZE_BYTE, deviceinfo->globalMemorySize() >> 2);
  void *d_out;
  CHECK_CUDA_ERROR(cudaMalloc(&d_out, WORK_SIZE_BYTE));
  size_t work_num = WORK_SIZE_BYTE / sizeof(T);
  size_t grid_size = work_num / BLOCK_SIZE;
  //warm up
  computekernel<T, OP<T>><<<grid_size, BLOCK_SIZE>>>((T)6.0, (T *)d_out);
  CHECK_CUDA_ERROR(cudaDeviceSynchronize());
  //profile
  float duration = 0;
  {
    CUDAEvent evt(&duration);
    for (int i = 0; i < ITERS_COM; i++) {
      computekernel<T, OP<T>><<<grid_size, BLOCK_SIZE>>>((T)6.0, (T *)d_out);
      CHECK_CUDA_ERROR(cudaDeviceSynchronize());
    }
  }
  CHECK_CUDA_ERROR(cudaFree(d_out));
  return float(work_num) * DO_OPS * ITERS_COM / duration / 1e6;
}

template <typename T>
float runMemoryKernel(CUDADeviceInfo* deviceinfo) {
  const size_t WORK_SIZE_BYTE = std::min(MAX_WORK_SIZE_BYTE, deviceinfo->globalMemorySize() >> 2);
  void *d_in, *d_out;
  CHECK_CUDA_ERROR(cudaMalloc(&d_in, WORK_SIZE_BYTE));
  CHECK_CUDA_ERROR(cudaMalloc(&d_out, WORK_SIZE_BYTE));
  size_t work_num = WORK_SIZE_BYTE / sizeof(T);
  size_t grid_size = work_num / BLOCK_SIZE / DO_LOOPS;
  //warm up
  memorykernel<T><<<grid_size, BLOCK_SIZE>>>((T *)d_in, (T *)d_out);
  CHECK_CUDA_ERROR(cudaDeviceSynchronize());
  //profile
  float duration = 0;
  {
    CUDAEvent evt(&duration);
    for (int i = 0; i < ITERS_MEM; i++) {
      memorykernel<T><<<grid_size, BLOCK_SIZE>>>((T *)d_in, (T *)d_out);
      CHECK_CUDA_ERROR(cudaDeviceSynchronize());
    }
  }
  CHECK_CUDA_ERROR(cudaFree(d_out));
  CHECK_CUDA_ERROR(cudaFree(d_in));
  return float(WORK_SIZE_BYTE) * 2 * ITERS_MEM / duration / 1e6;
}