#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>
#include <sys/time.h>

#include <cudnn.h>

// WIDTH_BLOCK_SIZE=32: BW=123GB/S
// WIDTH_BLOCK_SIZE=128: BW=320GB/S
const int WIDTH_BLOCK_SIZE = 32;
const int HEIGHT_BLOCK_SIZE = 8;

#define CONV_in_w  (2048) 
#define CONV_in_h  (2048)
#define CONV_in_c  (3)
#define CONV_out_c (3)

#define CONV_kernel_w  (3)
#define CONV_kernel_h  (3)
#define CONV_dilation_w (1) 
#define CONV_dilation_h (1)
#define CONV_stride_w  (1)
#define CONV_stride_h  (1)
#define CONV_pad_w  (0)
#define CONV_pad_h  (0)

#define CONV_out_w ((CONV_in_w + CONV_pad_w * 2 - CONV_kernel_w / 2 * 2 * CONV_dilation_w) / CONV_stride_w)
#define CONV_out_h ((CONV_in_h + CONV_pad_h * 2 - CONV_kernel_h / 2 * 2 * CONV_dilation_h) / CONV_stride_h)

struct  Conv2dAttributes {
    int in_w;
    int in_h;
    int kernel_w;
    int kernel_h;
    int in_c;
    int out_c;
    int dilation_w;
    int dilation_h;
    int stride_w;
    int stride_h;
    int pad_w;
    int pad_h;
    int out_w;
    int out_h;
    float pad_value;
};

#define PERF(name) Perf perf_##name##__COUNTER__(#name)
#define PERF_CPU(name) PerfCPU perf_CPU_##name##__COUNTER__(#name)

class PerfCPU
{
public:
    PerfCPU(const std::string& name) {
        m_name = name;
        gettimeofday(&m_start, NULL);
    }

    ~PerfCPU() {
        gettimeofday(&m_end, NULL);
        float elapsed_time = (m_end.tv_sec - m_start.tv_sec) * 1000.0 + (m_end.tv_usec - m_start.tv_usec) / 1000.0;
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    struct timeval m_start, m_end;
}; // class PerfCPU

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf


struct TensorIndex
{
    int n;
    int c;
    int h;
    int w;
};


__device__ void MapDstIdxToSrcIdx(const TensorIndex* dst, const Conv2dAttributes* attr, TensorIndex* src)
{
    src->h = dst->h * attr->stride_h - attr->pad_h;
    src->w = dst->w * attr->stride_w - attr->pad_w;
}

const float pad_value = 0.0;

template <int in_w, int in_h, int kernel_w, int kernel_h, int in_c,
    int out_c, int dilation_w, int dilation_h, int stride_w, int stride_h,
    int pad_w, int pad_h, int out_w, int out_h>
__global__ void GPUConv2dN1CHW_cache_input(float* input, float* kernel, float* output)
{
    // target index
    int idx_w = blockIdx.x * blockDim.x + threadIdx.x;
    int idx_h = blockIdx.y * blockDim.y + threadIdx.y;
    int idx_c = blockIdx.z * blockDim.z + threadIdx.z;

    const int per_kernel_eln = kernel_w * kernel_h * in_c;
    const float* kernel_co_ptr = kernel + idx_c * per_kernel_eln;

    __shared__ float kernel_cache[CONV_in_c][CONV_in_h][CONV_in_w];
    float *kernel_cache_ptr = &(kernel_cache[0][0][0]);
    int cache_idx = threadIdx.y * WIDTH_BLOCK_SIZE + threadIdx.x;
    for (int load_idx = cache_idx; load_idx < per_kernel_eln; load_idx += WIDTH_BLOCK_SIZE * HEIGHT_BLOCK_SIZE) {
        kernel_cache_ptr[load_idx] = kernel_co_ptr[load_idx];
    }

    const int input_cache_width = WIDTH_BLOCK_SIZE + CONV_kernel_w / 2 * 2 * CONV_dilation_w;
    const int input_cache_height = HEIGHT_BLOCK_SIZE + CONV_kernel_h / 2 * 2 * CONV_dilation_h;
    __shared__ float input_cache[CONV_in_c][input_cache_height][input_cache_width];

    TensorIndex dst;
    dst.n = 1;
    dst.c = idx_c;
    dst.h = idx_h;
    dst.w = idx_w;

    TensorIndex lt;
    lt.h = dst.h * stride_h - pad_h;
    lt.w = dst.w * stride_w - pad_w;

    /**

    // Load feature
    for (int c = 0; c < in_c; ++c) {
        
    }
    if (k_h + lt.h < 0 || k_h + lt.h >= in_h || (k_w + lt.w < 0 || k_w + lt.w >= in_w)) {
        input_cache[idx_c][][k_w] = pad_value;
    } else {
        input_cache[c][k_h][k_w] = input[c * in_h * in_w + (k_h + lt.h) * in_w + (k_w + lt.w)];
    }

    __syncthreads();

    if (idx_w >= out_w || idx_h >= out_h || idx_c >= out_c) {
        return;
    }



    float sum = 0.0;
    for (int ci = 0; ci < in_c; ++ci) {
        const float* input_c_ptr = input + ci * in_h * in_w;
        const float* kernel_c_ptr = kernel_cache + ci * kernel_h * kernel_w;
        for (int k_h = 0; k_h < kernel_h; ++k_h) {
            int index_y = lt.h + k_h * dilation_h;
            for (int k_w = 0; k_w < kernel_w; ++k_w) {
                int index_x = lt.w + k_w * dilation_w;
                if (index_x >= 0 && index_x < in_w && index_y >= 0 && index_y < in_h) {
                    sum += input_c_ptr[index_y * in_w + index_x] * kernel_c_ptr[k_h * kernel_w + k_w];
                } else {
                    sum += pad_value * kernel_c_ptr[k_h * kernel_w + k_w];
                }
            }
        }
    }

    float* output_c_ptr = output + dst.c * out_h * out_w;

    output_c_ptr[dst.h * out_w + dst.w] = sum;
     **/

}


void CUDNNConv2d(float* input, float* kernel, float* output)
{

    const int N = 1;    // batch size
    const int inC = CONV_in_c; // inChannel >15会出错？
    const int inH = CONV_in_h;
    const int inW = CONV_in_w;
    const int outC = CONV_out_c; // outChannel 每个都与不同的卷积核运算 之后再分别放到outChannel中
    const int kernelH = CONV_kernel_h;
    const int kernelW = CONV_kernel_w;
    const int outH = inH - kernelH + 1;
    const int outW = inW - kernelW + 1;

        /* ---- CUDNN CONV BEGIN ----*/
    // 初始化cudnn及相关Tensor描述符
    cudnnHandle_t handle;
    cudnnCreate(&handle);
    cudnnTensorDescriptor_t input_desc;
    cudnnCreateTensorDescriptor(&input_desc);
    cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
                               N, inC, inH, inW);

    cudnnTensorDescriptor_t output_desc;
    cudnnCreateTensorDescriptor(&output_desc);
    cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
                               N, outC, outH, outW);

    cudnnFilterDescriptor_t kernel_desc;
    cudnnCreateFilterDescriptor(&kernel_desc);
    cudnnSetFilter4dDescriptor(kernel_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
                               outC, inC, kernelH, kernelW); // k-outputChannel c-inputChannel h w

    // 卷积描述符
    cudnnConvolutionDescriptor_t conv_desc;
    cudnnCreateConvolutionDescriptor(&conv_desc);
    // pad_h-padding height pad_w u-vertical filter stride v-horizontal filter stride
    cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, 1, 1, 1, 1,
                                    CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT);
    // 运算空间
    size_t space_size = 0;
    cudnnConvolutionFwdAlgo_t alg_kind = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
    cudnnStatus_t Error =
        cudnnGetConvolutionForwardWorkspaceSize(handle, input_desc,
                                                kernel_desc, conv_desc, output_desc,
                                                alg_kind,
                                                &space_size);

    if (Error != CUDNN_STATUS_SUCCESS)
    {
        fprintf(stderr, "calc spacesize failed, err: %d!\n", Error);
        return;
    }

    void *workspace = nullptr;
    cudaMalloc(&workspace, space_size);

    // printf("space size: %ld\n", space_size); // 打印出是0？

    // 初始化GPU数据
    auto alpha = 1.0f;
    auto beta = 0.0f;
    size_t input_size = N * inC * inH * inW * sizeof(float);
    size_t kernel_size = outC * inC * kernelH * kernelW * sizeof(float);
    size_t output_size = N * outC * outH * outW * sizeof(float);

    void *dev_input = nullptr;
    cudaMalloc(&dev_input, input_size);
    cudaMemcpy(dev_input, input, input_size, cudaMemcpyHostToDevice);
    void *dev_kernel = nullptr;
    cudaMalloc(&dev_kernel, kernel_size);
    cudaMemcpy(dev_kernel, kernel, kernel_size, cudaMemcpyHostToDevice);
    void *dev_output = nullptr;
    cudaMalloc(&dev_output, output_size);

    // 检查核函数错误
    // cudaError_t err = cudaSetDevice(0);
    // if (err != cudaSuccess)
    // {
    //     errorHandler(err, __FILE__, __LINE__);
    // }
    
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    cudaEventRecord(start);

    Error = cudnnConvolutionForward(handle,
                                        &alpha, input_desc,
                                        dev_input,
                                        kernel_desc, dev_kernel,
                                        conv_desc,
                                        alg_kind,
                                        workspace,
                                        space_size,
                                        &beta,
                                        output_desc,
                                        dev_output);

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    float msecTotal = 0.0;
    cudaEventElapsedTime(&msecTotal, start, stop);

    cudaMemcpy(output, dev_output, output_size, cudaMemcpyDeviceToHost);
    printf("cudnn cost time: %f\n", msecTotal);

    if (Error != CUDNN_STATUS_SUCCESS)
    {
        fprintf(stderr, "cudnn forward failed!\n");
    }

    cudaFree(workspace);
    cudnnDestroyTensorDescriptor(input_desc);
    cudnnDestroyTensorDescriptor(output_desc);
    cudnnDestroyFilterDescriptor(kernel_desc);
    cudnnDestroyConvolutionDescriptor(conv_desc);
    cudnnDestroy(handle);
}



template <int in_w, int in_h, int kernel_w, int kernel_h, int in_c,
    int out_c, int dilation_w, int dilation_h, int stride_w, int stride_h,
    int pad_w, int pad_h, int out_w, int out_h>
__global__ void GPUConv2dN1CHW_template(float* input, float* kernel, float* output)
{
    // target index
    int idx_w = blockIdx.x * blockDim.x + threadIdx.x;
    int idx_h = blockIdx.y * blockDim.y + threadIdx.y;
    int idx_c = blockIdx.z * blockDim.z + threadIdx.z;

    const int per_kernel_eln = kernel_w * kernel_h * in_c;
    const float* kernel_co_ptr = kernel + idx_c * per_kernel_eln;

    extern __shared__ float kernel_cache[];
    int cache_idx = threadIdx.y * WIDTH_BLOCK_SIZE + threadIdx.x;
    // printf("out cache_idx: %d\n", cache_idx);
    for (;cache_idx < per_kernel_eln; cache_idx += WIDTH_BLOCK_SIZE * HEIGHT_BLOCK_SIZE) {
        kernel_cache[cache_idx] = kernel_co_ptr[cache_idx];
        // printf("cache_idx: %d, kernel_cache: %f\n", cache_idx, kernel_cache[cache_idx]);
    }
    __syncthreads();

    if (idx_w >= out_w || idx_h >= out_h || idx_c >= out_c) {
        // printf("dst idx_h: %d, idx_w: %d, idx_c: %d, dst max: h: %d, w: %d, c: %d\n", idx_h, idx_w, idx_c, out_h, out_w, out_c);
        return;
    }

    TensorIndex dst;
    dst.n = 1;
    dst.c = idx_c;
    dst.h = idx_h;
    dst.w = idx_w;

    TensorIndex lt;
    // MapDstIdxToSrcIdx(&dst, attr, &lt);

    lt.h = dst.h * stride_h - pad_h;
    lt.w = dst.w * stride_w - pad_w;

    float sum = 0.0;
    for (int ci = 0; ci < in_c; ++ci) {
        const float* input_c_ptr = input + ci * in_h * in_w;
        const float* kernel_c_ptr = kernel_cache + ci * kernel_h * kernel_w;
        for (int k_h = 0; k_h < kernel_h; ++k_h) {
            int index_y = lt.h + k_h * dilation_h;
            for (int k_w = 0; k_w < kernel_w; ++k_w) {
                int index_x = lt.w + k_w * dilation_w;
                if (index_x >= 0 && index_x < in_w && index_y >= 0 && index_y < in_h) {
                    sum += input_c_ptr[index_y * in_w + index_x] * kernel_c_ptr[k_h * kernel_w + k_w];
                } else {
                    sum += pad_value * kernel_c_ptr[k_h * kernel_w + k_w];
                }
            }
        }
    }

    float* output_c_ptr = output + dst.c * out_h * out_w;
    output_c_ptr[dst.h * out_w + dst.w] = sum;

}

__global__ void GPUConv2dN1CHW_KernelCache(const float* input, const float* kernel, float* output, const Conv2dAttributes* attr)
{
    // target index
    int idx_w = blockIdx.x * blockDim.x + threadIdx.x;
    int idx_h = blockIdx.y * blockDim.y + threadIdx.y;
    int idx_c = blockIdx.z * blockDim.z + threadIdx.z;

    const int per_kernel_eln = attr->kernel_w * attr->kernel_h * attr->in_c;
    const float* kernel_co_ptr = kernel + idx_c * per_kernel_eln;

    extern __shared__ float kernel_cache[];
    int cache_idx = threadIdx.y * WIDTH_BLOCK_SIZE + threadIdx.x;
    // printf("out cache_idx: %d\n", cache_idx);
    for (;cache_idx < per_kernel_eln; cache_idx += WIDTH_BLOCK_SIZE * HEIGHT_BLOCK_SIZE) {
        kernel_cache[cache_idx] = kernel_co_ptr[cache_idx];
        // printf("cache_idx: %d, kernel_cache: %f\n", cache_idx, kernel_cache[cache_idx]);
    }
    __syncthreads();

    if (idx_w >= attr->out_w || idx_h >= attr->out_h || idx_c >= attr->out_c) {
        // printf("dst idx_h: %d, idx_w: %d, idx_c: %d, dst max: h: %d, w: %d, c: %d\n", idx_h, idx_w, idx_c, attr->out_h, attr->out_w, attr->out_c);
        return;
    }

    TensorIndex dst;
    dst.n = 1;
    dst.c = idx_c;
    dst.h = idx_h;
    dst.w = idx_w;

    TensorIndex lt;
    MapDstIdxToSrcIdx(&dst, attr, &lt);

    float sum = 0.0;
    for (int ci = 0; ci < attr->in_c; ++ci) {
        const float* input_c_ptr = input + ci * attr->in_h * attr->in_w;
        const float* kernel_c_ptr = kernel_cache + ci * attr->kernel_h * attr->kernel_w;
        for (int k_h = 0; k_h < attr->kernel_h; ++k_h) {
            int index_y = lt.h + k_h * attr->dilation_h;
            for (int k_w = 0; k_w < attr->kernel_w; ++k_w) {
                int index_x = lt.w + k_w * attr->dilation_w;
                if (index_x >= 0 && index_x < attr->in_w && index_y >= 0 && index_y < attr->in_h) {
                    sum += input_c_ptr[index_y * attr->in_w + index_x] * kernel_c_ptr[k_h * attr->kernel_w + k_w];
                } else {
                    sum += attr->pad_value * kernel_c_ptr[k_h * attr->kernel_w + k_w];
                }
            }
        }
    }

    float* output_c_ptr = output + dst.c * attr->out_h * attr->out_w;
    output_c_ptr[dst.h * attr->out_w + dst.w] = sum;

}


__global__ void GPUConv2dN1CHW_NAIVE(const float* input, const float* kernel, float* output, const Conv2dAttributes* attr)
{
    // target index
    int idx_w = blockIdx.x * blockDim.x + threadIdx.x;
    int idx_h = blockIdx.y * blockDim.y + threadIdx.y;
    int idx_c = blockIdx.z * blockDim.z + threadIdx.z;

    if (idx_w >= attr->out_w || idx_h >= attr->out_h || idx_c >= attr->out_c) {
        // printf("dst idx_h: %d, idx_w: %d, idx_c: %d, dst max: h: %d, w: %d, c: %d\n", idx_h, idx_w, idx_c, attr->out_h, attr->out_w, attr->out_c);
        return;
    }

    TensorIndex dst;
    dst.n = 1;
    dst.c = idx_c;
    dst.h = idx_h;
    dst.w = idx_w;

    TensorIndex lt;
    MapDstIdxToSrcIdx(&dst, attr, &lt);

    const float* kernel_co_ptr = kernel + idx_c * attr->in_c * attr->kernel_h * attr->kernel_w;

    float sum = 0.0;
    for (int ci = 0; ci < attr->in_c; ++ci) {
        const float* input_c_ptr = input + ci * attr->in_h * attr->in_w;
        const float* kernel_c_ptr = kernel_co_ptr + ci * attr->kernel_h * attr->kernel_w;
        for (int k_h = 0; k_h < attr->kernel_h; ++k_h) {
            for (int k_w = 0; k_w < attr->kernel_w; ++k_w) {
                int index_x = lt.w + k_w * attr->dilation_w;
                int index_y = lt.h + k_h * attr->dilation_h;
                if (index_x >= 0 && index_x < attr->in_w && index_y >= 0 && index_y < attr->in_h) {
                    sum += input_c_ptr[index_y * attr->in_w + index_x] * kernel_c_ptr[k_h * attr->kernel_w + k_w];
                } else {
                    sum += attr->pad_value * kernel_c_ptr[k_h * attr->kernel_w + k_w];
                }
            }
        }
    }

    float* output_c_ptr = output + dst.c * attr->out_h * attr->out_w;
    output_c_ptr[dst.h * attr->out_w + dst.w] = sum;

}

void CPUConv2dN1CHW(const float* input, const float* kernel, float* output,
    Conv2dAttributes& attr)
{
    // 设置batch=1, 是6层循环
    int half_kernel_w = attr.kernel_w / 2;
    int half_kernel_h = attr.kernel_h / 2;

    int off_w = half_kernel_w * attr.dilation_w * 2;
    int off_h = half_kernel_h * attr.dilation_h * 2;

    for (int co = 0; co < attr.out_c; ++co) {
        float* out_c_ptr = output + co * attr.out_h * attr.out_w;
        size_t count = 0;
        const float* kernel_co_ptr = kernel + co * attr.in_c * attr.kernel_h * attr.kernel_w;
        int row = 0;
        for (int h = -attr.pad_h; h < attr.in_h + attr.pad_h - off_h; h += attr.stride_h) {
            int col = 0;
            // attr.in_w: 1024, attr.pad_w: 1, off_w = 2;
            for (int w = -attr.pad_w; w < attr.in_w + attr.pad_w - off_w; w += attr.stride_w) {
                float sum = 0.0;
                for (int ci = 0; ci < attr.in_c; ++ci) {
                    const float* kernel_ci_ptr = kernel_co_ptr + ci * attr.kernel_w * attr.kernel_h;
                    const float* in_c_ptr = input + ci * attr.in_h * attr.in_w;
                    // 每一个kernel先算好，再在channel上求和
                    for (int k_h = 0; k_h < attr.kernel_h; ++k_h) {
                        for (int k_w = 0; k_w < attr.kernel_w; ++k_w) {
                            int index_x = w + k_w * attr.dilation_w;
                            int index_y = h + k_h * attr.dilation_h;
                            if (index_x >= 0 && index_x < attr.in_w && index_y >= 0 && index_y < attr.in_h) {
                                sum += in_c_ptr[index_y * attr.in_w + index_x] * kernel_ci_ptr[k_h * attr.kernel_w + k_w];
                            } else {
                                sum += attr.pad_value * kernel_ci_ptr[k_h * attr.kernel_w + k_w];
                            }
                        }
                    }
                }

                out_c_ptr[count] = sum;
                count++;
                col++;
            }
            row++;
        }
    }
}

int main(int argc, char* argv[])
{
    std::srand(std::time(NULL));

    Conv2dAttributes attr{
        .in_w = CONV_in_w,
        .in_h = CONV_in_h,
        .kernel_w = CONV_kernel_w,
        .kernel_h = CONV_kernel_h,
        .in_c = CONV_in_c,
        .out_c = CONV_out_c,
        .dilation_w = CONV_dilation_w,
        .dilation_h = CONV_dilation_h,
        .stride_w = CONV_stride_w,
        .stride_h = CONV_stride_h,
        .pad_w = CONV_pad_w,
        .pad_h = CONV_pad_h,
        .pad_value = 0
    };

    Conv2dAttributes* gpu_attr;
    cudaMalloc(&gpu_attr, sizeof(Conv2dAttributes));

    size_t in_eln = attr.in_w * attr.in_h * attr.in_c;
    float* input = (float*)malloc(in_eln * sizeof(float));
    for (size_t i = 0; i < in_eln; ++i) {
        input[i] = std::rand() * 1.0 / RAND_MAX;
    }

    // 计算输出shape
    size_t kernel_w_r = attr.kernel_w / 2 * 2 * attr.dilation_w + 1;
    size_t kernel_h_r = attr.kernel_h / 2 * 2 * attr.dilation_h + 1;

    attr.out_w = (attr.in_w + attr.pad_w * 2 - kernel_w_r + 1) / attr.stride_w;
    attr.out_h = (attr.in_h + attr.pad_h * 2 - kernel_h_r + 1) / attr.stride_h;

    std::cout << "attr.out_w: " << attr.out_w << ", attr.out_h: " << attr.out_h << ", attr.out_c: " << attr.out_c << std::endl;

    int kernel_eln = attr.kernel_w * attr.kernel_h * sizeof(float) * attr.in_c * attr.out_c;
    float* kernel = (float*)malloc(kernel_eln * sizeof(float));

    for (size_t i = 0; i < kernel_eln; ++i) {
        kernel[i] = std::rand() * 1.0 / RAND_MAX;
    }

    int out_eln = attr.out_w * attr.out_h * attr.out_c;
    float* cpu_output = (float*)malloc(out_eln * sizeof(float));
    {
        PERF_CPU(CPUConv2dN1CHW);
        CPUConv2dN1CHW(input, kernel, cpu_output, attr);
    }

    // GPU
    // input
    float* d_input;
    cudaMalloc((void**)&d_input, in_eln * sizeof(float));
    cudaMemcpy(d_input, input, in_eln * sizeof(float), cudaMemcpyHostToDevice);

    // kernel
    float* d_kernel;
    cudaMalloc((void**)&d_kernel, kernel_eln * sizeof(float));
    cudaMemcpy(d_kernel, kernel, kernel_eln * sizeof(float), cudaMemcpyHostToDevice);

    float* d_output;
    cudaMalloc((void**)&d_output, out_eln * sizeof(float));
    float* gpu_output = (float*)malloc(out_eln * sizeof(float));

    // g_eln /= 2;
    dim3 gridDim;
    gridDim.x = (attr.out_w + WIDTH_BLOCK_SIZE - 1) / WIDTH_BLOCK_SIZE;
    gridDim.y = (attr.out_h + HEIGHT_BLOCK_SIZE - 1) / HEIGHT_BLOCK_SIZE;
    gridDim.z = attr.out_c;

    dim3 blockDim;
    blockDim.x = WIDTH_BLOCK_SIZE;
    blockDim.y = HEIGHT_BLOCK_SIZE;
    blockDim.z = 1;

    cudaMemcpy(gpu_attr, &attr, sizeof(Conv2dAttributes), cudaMemcpyHostToDevice);

    int per_kernel_eln = attr.kernel_w * attr.kernel_h * attr.in_c;
    std::cout << "per_kernel_eln: " << per_kernel_eln << std::endl;

    std::cout << "CONV_out_w: " << CONV_out_w << ", CONV_out_h: " << CONV_out_h << std::endl;
    {
        PERF(gpu_conv2d3x3);
        GPUConv2dN1CHW_NAIVE<<<gridDim, blockDim>>>(d_input, d_kernel, d_output, gpu_attr);

        GPUConv2dN1CHW_KernelCache<<<gridDim, blockDim, per_kernel_eln * sizeof(float)>>>(d_input, d_kernel, d_output, gpu_attr);

        GPUConv2dN1CHW_template<CONV_in_w, CONV_in_h, CONV_kernel_w, CONV_kernel_h, CONV_in_c, CONV_out_c,
            CONV_dilation_w, CONV_dilation_h, CONV_stride_w, CONV_stride_h, CONV_pad_w, CONV_pad_h, 
            CONV_out_w, CONV_out_h><<<gridDim, blockDim, per_kernel_eln * sizeof(float)>>>(d_input, d_kernel, d_output);

        // GPUConv2dN1CHW_cache_input<CONV_in_w, CONV_in_h, CONV_kernel_w, CONV_kernel_h, CONV_in_c, CONV_out_c,
        //     CONV_dilation_w, CONV_dilation_h, CONV_stride_w, CONV_stride_h, CONV_pad_w, CONV_pad_h, 
        //     CONV_out_w, CONV_out_h><<<gridDim, blockDim>>>(d_input, d_kernel, d_output);

        cudaDeviceSynchronize();
        cudaMemcpy(gpu_output, d_output, out_eln * sizeof(float), cudaMemcpyDeviceToHost);
    }

    float* cudnn_output = new float[out_eln];
    CUDNNConv2d(input, kernel, cudnn_output);

    const int count = 32;
    for (int i = 0; i < out_eln && (i < count); ++i) {
      if (std::fabs(cudnn_output[i] - gpu_output[i]) > 10e-3) {
        std::cout << "error: " << i << " cudnn_output: " << cudnn_output[i] << ", gpu_output: " << gpu_output[i] << std::endl;
        break;
      } else {
        // std::cout << "pass: " << i << " histogram: " << histogram[i] << ", h_output: " << h_output[i] << std::endl;
      }
    }

    cudaFree(d_input);
    cudaFree(d_kernel);
    cudaFree(d_output);

    free(input);
    free(kernel);
    free(cpu_output);
    free(gpu_output);

    delete [] cudnn_output;

    return 0;
}