#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>

// WIDTH_BLOCK_SIZE=32: BW=123GB/S
// WIDTH_BLOCK_SIZE=128: BW=320GB/S
const int WIDTH_BLOCK_SIZE = 128;
const int HEIGHT_BLOCK_SIZE = 8;
const int WARP_SIZE = 32;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf

__global__ void GPUHistogramNaive(uint8_t* input, int* output, int width, int height)
{
    int width_idx = blockIdx.x * blockDim.x + threadIdx.x;
    int height_idx = blockIdx.y * blockDim.y + threadIdx.y;

    if (width_idx >= width / 4 || height_idx >= height) {
        return;
    }

    __shared__ int shared_histogram[256];
    int tidx = threadIdx.y * WIDTH_BLOCK_SIZE + threadIdx.x;
    if (tidx < 256) {
        shared_histogram[tidx] = 0;
    }
    __syncthreads();

    int idx = width_idx + height_idx * width / 4;
    uchar4 data = reinterpret_cast<uchar4*>(input)[idx];

    atomicAdd(&shared_histogram[data.w], 1);
    atomicAdd(&shared_histogram[data.x], 1);
    atomicAdd(&shared_histogram[data.y], 1);
    atomicAdd(&shared_histogram[data.z], 1);

    __syncthreads();

    if (tidx < 256) {
        atomicAdd(&output[tidx], shared_histogram[tidx]);
        __syncthreads();
    }
}

void CPUHistogram(const uint8_t* input, int* output, size_t len)
{
    for (int i = 0; i < len; ++i) {
        output[input[i]] ++;
    }
}

int main(int argc, char* argv[])
{
    std::cout << "start to run histogram_naive.cu" << std::endl;

    std::srand(std::time(NULL));
    size_t img_w = 2048;
    size_t img_h = 2048;
    size_t img_size = img_w * img_h;

    uint8_t* input = (uint8_t*)malloc(img_size);
    // init input
    for (size_t i = 0; i < img_size; ++i) {
        input[i] = std::rand() % 256;
    }

    int histogram[256] = {0};

    // CPU
    CPUHistogram(input, histogram, img_size);

    // GPU
    uint8_t* d_input;
    cudaMalloc((void**)&d_input, img_size * sizeof(uint8_t));
    cudaMemcpy(d_input, input, img_size * sizeof(uint8_t), cudaMemcpyHostToDevice);

    int* d_output;
    cudaMalloc((void**)&d_output, 256 * sizeof(int));
    int* h_output = (int*)malloc(256 * sizeof(int));

    // g_eln /= 2;
    dim3 gridDim;
    gridDim.x = (img_w / 4 + WIDTH_BLOCK_SIZE - 1) / WIDTH_BLOCK_SIZE;
    gridDim.y = (img_h + HEIGHT_BLOCK_SIZE - 1) / HEIGHT_BLOCK_SIZE;
    gridDim.z = 1;

    dim3 blockDim;
    blockDim.x = WIDTH_BLOCK_SIZE;
    blockDim.y = HEIGHT_BLOCK_SIZE;
    blockDim.z = 1;

    std::cout << "gridDim.x: " << gridDim.x << std::endl;
    std::cout << "gridDim.y: " << gridDim.y << std::endl;
    std::cout << "gridDim.z: " << gridDim.z << std::endl;

    {
        PERF(histogram_naive);
        GPUHistogramNaive<<<gridDim, blockDim>>>(d_input, d_output, img_w, img_h);
        cudaDeviceSynchronize();
        cudaMemcpy(h_output, d_output, 256 * sizeof(int), cudaMemcpyDeviceToHost);
    }

    for (int i = 0; i < 256; ++i) {
      if (histogram[i] != h_output[i]) {
        std::cout << "error: " << i << " histogram: " << histogram[i] << ", h_output: " << h_output[i] << std::endl;
        // break;
      } else {
        // std::cout << "pass: " << i << " histogram: " << histogram[i] << ", h_output: " << h_output[i] << std::endl;
      }
    }

    return 0;
}