#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>

// WIDTH_BLOCK_SIZE=32: BW=123GB/S
// WIDTH_BLOCK_SIZE=128: BW=320GB/S
const int WIDTH_BLOCK_SIZE = 128;
const int HEIGHT_BLOCK_SIZE = 8;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf

struct  Conv2dAttributes {
    int in_w;
    int in_h;
    int kernel_size;
    int in_c;
    int out_c;
};

__global__ void GPUConv2d3x3(const float* input, const float* kernel, float* output, // 308GB/s
    Conv2dAttributes& attr)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int idy = blockIdx.y * blockDim.y + threadIdx.y;

    if (idx >= 1024 || idy >= 1024) {
        return;
    }
    float sum = 0.0;
    #pragma unroll 9
    // 内层循环表示卷积核中心点所在的id
    // 维持输出与输入的feature size大小不变
    for (int i = 0; i < 3; ++i) {
        int fe_offset_x = idx - 3 / 2 + i;
        if (fe_offset_x < 0 || fe_offset_x >= 1024) {
            continue;
        }
        for (int j = 0; j < 3; ++j) {
            int fe_offset_y = idy - 3 / 2 + j;
            if (fe_offset_y < 0 || fe_offset_y >= 1024) {
                continue;
            }
            sum += input[fe_offset_y * 1024 + fe_offset_x] * kernel[i * 3 + j];
        }
    }
    output[idy * 1024 + idx] = sum;
}

__global__ void GPUConv2d3x3KernelSHM(const float* input, const float* kernel, float* output, // 308GB/s
    Conv2dAttributes& attr)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int idy = blockIdx.y * blockDim.y + threadIdx.y;

    if (idx >= 1024 || idy >= 1024) {
        return;
    }

    __shared__ float shm_kernel[3][3];
    __shared__ float feature[32][32];
    feature[threadIdx.y][threadIdx.x] = input[idy * 1024 + idx];

    if (threadIdx.x < 3 && threadIdx.y < 3) {
        shm_kernel[threadIdx.x][threadIdx.y] = kernel[threadIdx.x * 3 + threadIdx.y];
    }

    __syncthreads();



    float sum = 0.0;
    #pragma unroll 9
    // 内层循环表示卷积核中心点所在的id
    // 维持输出与输入的feature size大小不变
    for (int i = 0; i < 3; ++i) {
        int fe_offset_x = idx - 3 / 2 + i;
        if (fe_offset_x < 0 || fe_offset_x >= 1024) {
            continue;
        }
        for (int j = 0; j < 3; ++j) {
            int fe_offset_y = idy - 3 / 2 + j;
            if (fe_offset_y < 0 || fe_offset_y >= 1024) {
                continue;
            }
            sum += input[fe_offset_y * 1024 + fe_offset_x] * shm_kernel[i][j];
        }
    }
    output[idy * 1024 + idx] = sum;
}

void CPUConv2dN1CHW(const float* input, const float* kernel, float* output,
    Conv2dAttributes& attr)
{
    // 假定输入/输出feature size大小相同，则需要做pad，pad的值为0，大小为kernel_size/2, stride=1
    // 设置batch=1, 则是6层循环
    int pad_size = attr.kernel_size / 2;
    for (int co = 0; co < attr.out_c; ++co) {
        float* out_c_ptr = output + co * attr.in_h * attr.in_w;
        const float* kernel_co_ptr = kernel + co * attr.in_c * attr.kernel_size * attr.kernel_size;
        for (int h = 0; h < attr.in_h; ++h) {
            for (int w = 0; w < attr.in_w; ++w) {
                float sum = 0.0;
                for (int ci = 0; ci < attr.in_c; ++ci) {
                    const float* kernel_ci_ptr = kernel_co_ptr + ci * attr.kernel_size * attr.kernel_size;
                    const float* in_c_ptr = input + ci * attr.in_h * attr.in_w;
                    // 每一个张先算好，再在channel上求和
                    for (int k_h = 0; k_h < attr.kernel_size; ++k_h) {
                        for (int k_w = 0; k_w < attr.kernel_size; ++k_w) {
                            int index_x = w + k_w - pad_size;
                            int index_y = h + k_h - pad_size;
                            if (index_x >= 0 && index_x < attr.in_w && index_y >= 0 && index_y < attr.in_h) {
                                sum += in_c_ptr[index_y * attr.in_w + index_x] * kernel_ci_ptr[k_h * attr.kernel_size + k_w];
                            }
                        }
                    }
                }
                out_c_ptr[h * attr.in_w + w] = sum;
            }
        }
    }
}


void CPUConv2d3x3(const float* input, const float* kernel, float* output,
    Conv2dAttributes& attr)
{
    // 假定输入/输出feature size大小相同
    int half_k = attr.kernel_size / 2;

    // 外层循环表示卷积核中心点所在的id
    // 维持输出与输入的feature size大小不变
    for (int h = 0; h < attr.in_h; ++h) {
        for (int w = 0; w < attr.in_w; ++w) {
            float sum = 0.0;
            for (int i = 0; i < attr.kernel_size; ++i) {
                for (int j = 0; j < attr.kernel_size; ++j) {
                    int fe_offset_x = w - half_k + i;
                    int fe_offset_y = h - half_k + j;
                    if (fe_offset_x < 0 || fe_offset_x >= attr.in_w || fe_offset_y < 0 || fe_offset_y >= attr.in_h) {
                        continue;
                    }
                    sum += input[fe_offset_y * attr.in_w + fe_offset_x] * kernel[i * attr.kernel_size + j];
                }
            }
            output[h * attr.in_w + w] = sum;
        }
    }
}


int main(int argc, char* argv[])
{
    std::cout << "start to run histogram_naive.cu" << std::endl;

    std::srand(std::time(NULL));
    
    Conv2dAttributes attr{
        .in_w = 1024,
        .in_h = 1024,
        .kernel_size = 3,
    };
    

    size_t eln = attr.in_w * attr.in_h;

    float* input = (float*)malloc(eln * sizeof(float));
    // init input
    for (size_t i = 0; i < eln; ++i) {
        input[i] = std::rand() * 1.0 / RAND_MAX;
    }

    size_t out_w = attr.in_w;
    size_t out_h = attr.in_h;

    float* kernel = (float*)malloc(attr.kernel_size * attr.kernel_size * sizeof(float));
    for (size_t i = 0; i < attr.kernel_size * attr.kernel_size; ++i) {
        kernel[i] = std::rand() * 1.0 / RAND_MAX;
    }

    float* cpu_output = (float*)malloc(eln * sizeof(float));

    {
        PERF(cpu_conv2d3x3);
        CPUConv2d3x3(input, kernel, cpu_output, attr);
    }

    // GPU
    // input
    float* d_input;
    cudaMalloc((void**)&d_input, eln * sizeof(float));
    cudaMemcpy(d_input, input, eln * sizeof(float), cudaMemcpyHostToDevice);

    // kernel
    float* d_kernel;
    cudaMalloc((void**)&d_kernel, attr.kernel_size * attr.kernel_size * sizeof(float));
    cudaMemcpy(d_kernel, kernel, attr.kernel_size * attr.kernel_size * sizeof(float), cudaMemcpyHostToDevice);

    float* d_output;
    cudaMalloc((void**)&d_output, eln * sizeof(float));
    float* gpu_output = (float*)malloc(eln * sizeof(float));

    // g_eln /= 2;
    dim3 gridDim;
    gridDim.x = (attr.in_w + WIDTH_BLOCK_SIZE - 1) / WIDTH_BLOCK_SIZE;
    gridDim.y = (attr.in_h + HEIGHT_BLOCK_SIZE - 1) / HEIGHT_BLOCK_SIZE;
    gridDim.z = 1;

    dim3 blockDim;
    blockDim.x = WIDTH_BLOCK_SIZE;
    blockDim.y = HEIGHT_BLOCK_SIZE;
    blockDim.z = 1;

    std::cout << "gridDim.x: " << gridDim.x << std::endl;
    std::cout << "gridDim.y: " << gridDim.y << std::endl;
    std::cout << "gridDim.z: " << gridDim.z << std::endl;

    {
        PERF(gpu_conv2d3x3);
        GPUConv2d3x3KernelSHM<<<gridDim, blockDim>>>(d_input, d_kernel, d_output, attr);
        cudaDeviceSynchronize();
        cudaMemcpy(gpu_output, d_output, eln * sizeof(float), cudaMemcpyDeviceToHost);
    }

    for (int i = 0; i < eln; ++i) {
      if (std::fabs(cpu_output[i] - gpu_output[i]) > 10e-6) {
        std::cout << "error: " << i << " cpu_output: " << cpu_output[i] << ", gpu_output: " << gpu_output[i] << std::endl;
        // break;
      } else {
        // std::cout << "pass: " << i << " histogram: " << histogram[i] << ", h_output: " << h_output[i] << std::endl;
      }
    }

    return 0;
}