#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0

__global__ void summaryGPU(float *array_a, float *array_b, float *array_res)
{
    int index = threadIdx.x + blockDim.x * blockIdx.x;
    array_res[index] = array_a[index] + array_b[index];
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    int nElem = 1 << 15;
    size_t nBytes = sizeof(float) * nElem;

    // 初始化流
    cudaStream_t stream0;
    cudaStreamCreate(&stream0);

    float *host_array_a = nullptr;
    float *host_array_b = nullptr;
    // 申请固定主机内存，虽然使用cuda开头函数，但该内存还是在host端; 所以 cudaHostAlloc() 实际替换的是malloc
    CHECK(cudaMallocHost((float **)&host_array_a, nBytes));
    CHECK(cudaMallocHost((float **)&host_array_b, nBytes));
    initialDataRandom(host_array_a, nElem);
    initialDataRandom(host_array_b, nElem);

    // cuda申请内存用来存储device端数组相加的计算结果
    float *device_array_a = nullptr;
    float *device_array_b = nullptr;
    float *device_array_res = nullptr;
    CHECK(cudaMalloc((float **)&device_array_res, nBytes));
    CHECK(cudaMalloc((float **)&device_array_a, nBytes));
    CHECK(cudaMalloc((float **)&device_array_b, nBytes));
    CHECK(cudaMemset(device_array_res, 0, nBytes));

    // 使用数据异步传输api
    CHECK(cudaMemcpyAsync(device_array_a, host_array_a, nBytes, cudaMemcpyHostToDevice, stream0));
    CHECK(cudaMemcpyAsync(device_array_b, host_array_b, nBytes, cudaMemcpyHostToDevice, stream0));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size = block_dims(nElem);
    dim3 grid_size = grid_dims(nElem);

    summaryGPU<<<grid_size, block_size, 0, stream0>>>(device_array_a, device_array_b, device_array_res);
    CHECK(cudaStreamSynchronize(stream0));
    // CHECK(cudaDeviceSynchronize());

    // 释放设备内存
    cudaFree(device_array_a);
    cudaFree(device_array_b);
    cudaFree(device_array_res);

    // 释放固定内存
    cudaFreeHost(host_array_a);
    cudaFreeHost(host_array_b);

    // 销毁流
    cudaStreamDestroy(stream0);
    return 0;
}