// Project adapted from https://developer.nvidia.com/blog/even-easier-introduction-cuda/
// 3. 加入memcpy
#include <iostream>
#include <math.h>
#include <chrono>  

// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkGPUErrors(val) __check((val), #val, __FILE__, __LINE__)

void __check(cudaError_t result, char const *const func, const char *const file,
           int const line) {
  if (result) {
    fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
            static_cast<unsigned int>(result), cudaGetErrorName(result), func);
    exit(EXIT_FAILURE);
  }
}

// 正式开始

// GPU kernel to add the elements of two arrays
__global__ void vector_add(int array_size, float *x, float *y, float *z)
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x; // 即num_thread * num_block, 256*32
    for (int i = tid; i < array_size ; i = i + stride)
        z[i] = x[i] + y[i];
}

int main(void)
{
    int array_size = 1<<20; // 1 M elements

    float *x, *y, *z;
    y = (float*)malloc(array_size*sizeof(float));
    z = (float*)malloc(array_size*sizeof(float));
    x = (float*)malloc(array_size*sizeof(float));

    float *x_gpu, *y_gpu, *z_gpu; 
    checkGPUErrors(cudaMalloc(&x_gpu, array_size*sizeof(float)));
    checkGPUErrors(cudaMalloc(&y_gpu, array_size*sizeof(float)));
    checkGPUErrors(cudaMalloc(&z_gpu, array_size*sizeof(float)));

    // initialize x and y arrays on the host
    for (int i = 0; i < array_size; i++) {
        x[i] = 1.0f;
        y[i] = 2.0f;
    }
    checkGPUErrors(cudaMemcpy(x_gpu, x, array_size*sizeof(float), cudaMemcpyHostToDevice));
    checkGPUErrors(cudaMemcpy(y_gpu, y, array_size*sizeof(float), cudaMemcpyHostToDevice));

    // Warm up the GPU to ensure accurate timing
    // During the first call to a CUDA function, the GPU kernel may need to be recompiled and transfered to GPU video RAM. 
     vector_add<<<1,1>>>(1, x_gpu, y_gpu, z_gpu);

    cudaEvent_t gpu_start, gpu_stop;
    cudaEventCreate(&gpu_start);
    cudaEventCreate(&gpu_stop);

    auto start = std::chrono::high_resolution_clock::now();
    cudaEventRecord(gpu_start);

    int block_dim = 256;
    int grid_dim = 16;

    vector_add<<<grid_dim,block_dim>>>(array_size, x_gpu, y_gpu, z_gpu);
    cudaEventRecord(gpu_stop);
    cudaDeviceSynchronize();
    
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, gpu_start, gpu_stop);

    auto stop = std::chrono::high_resolution_clock::now();
    // Get duration. Substart timepoints to get duration. To cast it to proper unit use duration cast method
    auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
    printf("Execution time measured on CPU: %ld us.\n", duration.count());
    printf("Execution time measured on GPU: %f us.\n", milliseconds*1000);

    cudaMemcpy(z, z_gpu, array_size*sizeof(float), cudaMemcpyDeviceToHost);
    std::cout << "z[0]: " << z[0] << std::endl;
    float maxError = 0.0f;
    for (int i = 0; i < array_size; i++)
        maxError = fmax(maxError, fabs(z[i]-3.0f));
    if(maxError < 1e-6)
        std::cout << "PASS: Max error: " << maxError << std::endl;
    else
        std::cout << "FAIL: Max error: " << maxError << std::endl;

    // Free memory
    checkGPUErrors(cudaFree(x_gpu));
    checkGPUErrors(cudaFree(y_gpu));
    checkGPUErrors(cudaFree(z_gpu));
    free(x);
    free(y);
    free(z);

    return 0;
}