
#include "shm_bw.h"
const size_t MAX_BLOCKS = 8192;
const size_t THR_SIZE = 64;
__global__ void cuda_add_shm_ori(float *out, float *x, float y, size_t N) {
  extern __shared__ float sharedMem[];
  size_t num_in_grid = size_t(blockDim.x * gridDim.x * THR_SIZE);
  float tmp[THR_SIZE];
  for (size_t n = 0; n < N; n += num_in_grid) {
    for (size_t i = threadIdx.x; i < blockDim.x * THR_SIZE; i += blockDim.x) {
      sharedMem[i] = x[n + blockIdx.x * blockDim.x * THR_SIZE + i];
    }
    __syncthreads();
#pragma unroll
    for (size_t j = 0; j < 1000; j++) {
#pragma unroll
      for (size_t i = 0; i < THR_SIZE; i += 1) {
        size_t idx = threadIdx.x + blockDim.x * i;
        tmp[i] = sharedMem[idx] + y;
      }
#pragma unroll
      for (size_t i = 0; i < THR_SIZE; i += 1) {
        size_t idx = threadIdx.x + blockDim.x * i;
        sharedMem[idx] = tmp[i];
      }
    }
    __syncthreads();
    for (size_t i = threadIdx.x; i < blockDim.x * THR_SIZE; i += blockDim.x) {
      out[n + blockIdx.x * blockDim.x * THR_SIZE + i] = sharedMem[i];
    }
    __syncthreads();
  }
}

void shm_bw_ori(float *out, float *x, float y, cudaStream_t stream, size_t N) {
  constexpr size_t threads_per_block = 128;
  auto num_blocks = min(size_t((N + threads_per_block * THR_SIZE - 1) /
                               (threads_per_block * THR_SIZE)),
                        MAX_BLOCKS);
  cuda_add_shm_ori<<<num_blocks, threads_per_block,
                     threads_per_block * 4 * THR_SIZE, stream>>>(out, x, y, N);
}
