
#include "gmem_bw.h"
const size_t MAX_BLOCKS = 8192;
__global__ void cuda_add_ori(float *out, float *x, float y, size_t N) {
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  size_t num_threads_in_grid = size_t(blockDim.x * gridDim.x);
  for (size_t i = idx; i < N; i += num_threads_in_grid) {
    out[i] = x[i] + y;
  }
}
__global__ void cuda_add_opt1(float *out, float *x, float y, size_t N) {
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  size_t num_threads_in_grid = size_t(blockDim.x * gridDim.x);
  for(size_t i = idx; i < N/4; i += num_threads_in_grid) {
    float4 x4 = reinterpret_cast<float4*>(x)[i];
    float4 out4;
    out4.x = x4.x + y;
    out4.y = x4.y + y;
    out4.z = x4.z + y;
    out4.w = x4.w + y;
    reinterpret_cast<float4*>(out)[i] = out4;
  }

  // in only one thread, process final elements (if there are any)
  size_t remainder = N%4;
  if (idx==N/4 && remainder!=0) {
    while(remainder) {
      size_t idx = N - remainder--;
      out[idx] = x[idx] + y;
    }
  }
}

void gmem_bw_ori(float *out, float *x, float y, cudaStream_t stream, size_t N) {
  constexpr size_t threads_per_block = 128;
  auto num_blocks = min(size_t((N + threads_per_block - 1) / threads_per_block), MAX_BLOCKS);
  cuda_add_ori<<<num_blocks, threads_per_block, 0, stream>>>(out, x, y, N);
}

void gmem_bw_opt1(float *out, float *x, float y, cudaStream_t stream, size_t N) {
  constexpr size_t threads_per_block = 128;
  auto num_blocks = min(size_t((N + threads_per_block * 4 - 1) / (threads_per_block * 4)), MAX_BLOCKS);
  cuda_add_opt1<<<num_blocks, threads_per_block, 0, stream>>>(out, x, y, N);
}