#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0
#define N_SEGMENT 3

__host__ void summaryCPU(float *array_a, float *array_b, float *array_res, int size)
{
    for (int i = 0; i < size; i++)
        array_res[i] = array_a[i] + array_b[i];
}

__global__ void summaryGPU(float *array_a, float *array_b, float *array_res, int N)
{
    int index = threadIdx.x + blockDim.x * blockIdx.x;
    if (index < N)
        array_res[index] = array_a[index] + array_b[index];
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    int nElem = (1 << 15) + 1; // +1是为了保证数组长度正好是3个整数倍
    size_t nBytes = sizeof(float) * nElem;

    double time_cpu, time_gpu;

    // cpu申请固定内存
    float *host_array_a = nullptr;
    float *host_array_b = nullptr;
    float *host_array_res = nullptr;
    float *res_from_gpu_h = nullptr;
    CHECK(cudaMallocHost((float **)&host_array_a, nBytes));
    CHECK(cudaMallocHost((float **)&host_array_b, nBytes));
    CHECK(cudaMallocHost((float **)&host_array_res, nBytes));
    CHECK(cudaMallocHost((float **)&res_from_gpu_h, nBytes));
    initialDataRandom(host_array_a, nElem);
    initialDataRandom(host_array_b, nElem);
    memset(host_array_res, 0, nBytes);
    memset(res_from_gpu_h, 0, nBytes);

    // 计算cpu中两数组相加结果并计算时间
    time_cpu = get_time();
    summaryCPU(host_array_a, host_array_b, host_array_res, nElem);
    CHECK(cudaDeviceSynchronize());
    std::cout << "CPU Sum Array time:" << get_time() - time_cpu << "ms" << std::endl;

    // gpu内存申请
    float *device_array_a = nullptr;
    float *device_array_b = nullptr;
    float *device_array_res = nullptr;
    CHECK(cudaMalloc((float **)&device_array_a, nBytes));
    CHECK(cudaMalloc((float **)&device_array_b, nBytes));
    CHECK(cudaMalloc((float **)&device_array_res, nBytes));

    dim3 block_size = block_dims(nElem);
    dim3 grid_size = grid_dims(nElem);

    // 多流异步并行计算,这里创建3个流
    int iElem = nElem / N_SEGMENT;
    cudaStream_t stream[N_SEGMENT];
    for (int i = 0; i < N_SEGMENT; i++)
    {
        CHECK(cudaStreamCreate(&stream[i]));
    }

    // 计时，注意，这里把数据传输也记在里面了
    time_gpu = get_time();
    for (int i = 0; i < N_SEGMENT; i++)
    {
        int ioffset = i * iElem;
        CHECK(cudaMemcpyAsync(&device_array_a[ioffset], &host_array_a[ioffset], nBytes / N_SEGMENT, cudaMemcpyHostToDevice, stream[i]));
        CHECK(cudaMemcpyAsync(&device_array_b[ioffset], &host_array_b[ioffset], nBytes / N_SEGMENT, cudaMemcpyHostToDevice, stream[i]));
        summaryGPU<<<grid_size, block_size, 0, stream[i]>>>(&device_array_a[ioffset], &device_array_b[ioffset], &device_array_res[ioffset], iElem);
        CHECK(cudaMemcpyAsync(&res_from_gpu_h[ioffset], &device_array_res[ioffset], nBytes / N_SEGMENT, cudaMemcpyDeviceToHost, stream[i]));
    }
    CHECK(cudaDeviceSynchronize());
    std::cout << "GPU Sum Array time:" << get_time() - time_gpu << "ms" << std::endl;

    // 将gpu运算结果复制到cpu后并与cpu结果进行比较
    checkResult(host_array_res, res_from_gpu_h, nElem);

    // 销毁流
    for (int i = 0; i < N_SEGMENT; i++)
    {
        CHECK(cudaStreamDestroy(stream[i]));
    }

    // 释放gpu和cpu内存
    cudaFree(device_array_a);
    cudaFree(device_array_b);
    cudaFree(device_array_res);

    // 释放固态内存
    cudaFreeHost(host_array_a);
    cudaFreeHost(host_array_b);
    cudaFreeHost(host_array_res);
    cudaFreeHost(res_from_gpu_h);

    return 0;
}