//
// Created by 梅朝阳 on 2021/6/26.
//
#include <cstdio>
#include "common.cuh"
#include "error/handel_error.cuh"


__global__ void conv(float *img, float *kernel, float *result,
                     int width, int height, int kernel_size){
    int ti = threadIdx.x;
    int bi = blockIdx.x;
    int id = bi * blockDim.x + ti;
//    int id = blockDim.x * blockIdx.x + threadIdx.x;
    if(id >= width * height){
        return;
    }
    int row = id / width;
    int col = id % width;

    for(int i=0; i<kernel_size; ++i){
        for(int j=0; j<kernel_size; ++j){
            int cur_row = row - kernel_size/2 + i;
            int cur_col = col - kernel_size/2 + j;
            if(cur_row <0 || cur_row >= height || cur_col <0 || cur_col > width){
                continue;
            }
            result[id] += img[cur_row * width + cur_col] * kernel[i * kernel_size + j];
        }
    }

}


int main(){
    // 初始化图像
    int width = 1920;
    int height = 1080;
    float *img = new float[width * height];
    for(int row=0; row < height; ++row){
        for(int col=0; col < width; ++col){
            img[row * width + col] = (col + row) % 256;
        }
    }
    // 初始化卷积核
    int kernel_size = 3;
    float *kernel = new float[kernel_size * kernel_size];
    for(int i=0; i < kernel_size * kernel_size; ++i){
        kernel[i] = i % kernel_size - 1;
    }

    // 分配gpu内存，并复制内存中的数据到显存上
    float *img_gpu;
    float *kernel_gpu;
    float *result_gpu;

    HANDLE_ERROR(cudaMalloc((void**)&img_gpu, width * height * sizeof(float)));
    HANDLE_ERROR(cudaMalloc((void**)&kernel_gpu, kernel_size * kernel_size * sizeof(float)));
    HANDLE_ERROR(cudaMalloc((void**)&result_gpu, width * height * sizeof(float)));

    HANDLE_ERROR(cudaMemcpy(img_gpu, img, width * height * sizeof(float), cudaMemcpyHostToDevice));
    HANDLE_ERROR(cudaMemcpy(kernel_gpu, kernel, kernel_size * kernel_size * sizeof(float), cudaMemcpyHostToDevice));
    // 定义函数
    int thread_num = getThreadNum();
    int block_num = (width * height - 1) / thread_num + 1;

    conv<<<block_num, thread_num>>>(img_gpu, kernel_gpu, result_gpu, width, height, kernel_size);

    float *result = new float[width * height];
    HANDLE_ERROR(cudaMemcpy(result, result_gpu, width * height * sizeof(float), cudaMemcpyDeviceToHost));

    // visualization
    printf("image:\n");
    for(int row=0; row < 10; ++row){
        for(int col=0; col < 10; ++col) {
            printf("%2.0f ", img[col + row * width]);
        }
        printf("\n");
    }
    printf("kernel:\n");
    for(int row=0; row < kernel_size; ++row){
        for(int col=0; col < kernel_size; ++col) {
            printf("%2.0f ", kernel[col + row * kernel_size]);
        }
        printf("\n");
    }
    printf("result:\n");
    for(int row=0; row < 10; ++row){
        for(int col=0; col < 10; ++col) {
            printf("%2.0f ", result[col + row * width]);
        }
        printf("\n");
    }
}

