#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0

__host__ void summaryCPU(float *array_a, float *array_b, float *array_res, int size)
{
    for (int i = 0; i < size; i++)
        array_res[i] = array_a[i] + array_b[i];
}

__global__ void summaryGPU(float *array_a, float *array_b, float *array_res)
{
    int index = threadIdx.x + blockDim.x * blockIdx.x;
    array_res[index] = array_a[index] + array_b[index];
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    int nElem = 1 << 15;
    size_t nBytes = sizeof(float) * nElem;

    double time_cpu, time_gpu;

    // 使用cudaHostAlloc函数申请零拷贝内存,该内存host & device 端都可访问
    float *host_array_a = nullptr;
    float *host_array_b = nullptr;
    CHECK(cudaHostAlloc((float **)&host_array_a, nBytes, cudaHostAllocMapped));
    CHECK(cudaHostAlloc((float **)&host_array_b, nBytes, cudaHostAllocMapped));

    // cpu申请内存用来存储host端数组相加的计算结果
    float *host_array_res = (float *)malloc(nBytes);
    memset(host_array_res, 0, nBytes);
    // 计算cpu中两数组相加结果并计算时间
    time_cpu = get_time();
    summaryCPU(host_array_a, host_array_b, host_array_res, nElem);
    std::cout << "CPU Sum Array time:" << get_time() - time_cpu << "ms" << std::endl;

    // cuda申请内存用来存储device端数组相加的计算结果
    float *device_array_res = nullptr;
    CHECK(cudaMalloc((float **)&device_array_res, nBytes));
    CHECK(cudaMemset(device_array_res, 0, nBytes));

    // cuda 获取映射到固定内存的设备指针
    // float *device_array_a = nullptr;
    // float *device_array_b = nullptr;
    // CHECK(cudaHostGetDevicePointer((void **)&device_array_a, (void *)host_array_a, 0));
    // CHECK(cudaHostGetDevicePointer((void **)&device_array_b, (void *)host_array_b, 0));

    // 调用kernel函数执行gpu数组加法运算
    dim3 block_size = block_dims(nElem);
    dim3 grid_size = grid_dims(nElem);
    // cuda warm up
    // for (size_t i = 0; i < 10; i++)
    //     summaryGPU<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_res);

    time_gpu = get_time();
    // summaryGPU<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_res);
    // 设备支持了统一虚拟寻址，设备端可直接使用零拷贝内存地址，无需再做映射
    summaryGPU<<<grid_size, block_size>>>(host_array_a, host_array_b, device_array_res);
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU Sum Array time:" << get_time() - time_gpu << "ms" << std::endl;

    // 将gpu运算结果复制到cpu后并与cpu结果进行比较
    float *res_gpu_to_cpu = (float *)malloc(nBytes);
    memset(res_gpu_to_cpu, 0, nBytes);
    CHECK(cudaMemcpy(res_gpu_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));
    checkResult(host_array_res, res_gpu_to_cpu, nElem);

    // 释放gpu内存
    // cudaFree(device_array_a);
    // cudaFree(device_array_b);
    cudaFree(device_array_res);

    // 释放零拷贝内存
    cudaFreeHost(host_array_a);
    cudaFreeHost(host_array_b);

    // 释放cpu内存
    free(host_array_res);
    free(res_gpu_to_cpu);

    return 0;
}