#include <iostream>
#include <math.h>
#include "cuda_common.h"

// Kernel function to add the elements of two arrays
__global__
void add(int n, float* x, float* y)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    for (int i = idx; i < n; i += stride)
    {
        y[i] = x[i] + y[i];
    }
}

int main(void)
{
    int N = 1 << 20;
    float *d_x, *d_y;
    float *h_x, *h_y, *h_result;

    // 分配host pinned memory
    cudaMallocHost((void**)&h_x, N * sizeof(float));
    cudaMallocHost((void**)&h_y, N * sizeof(float));
    cudaMallocHost((void**)&h_result, N * sizeof(float));

    // 分配device memory
    cudaMalloc((void**)&d_x, N * sizeof(float));
    cudaMalloc((void**)&d_y, N * sizeof(float));

    // initialize x and y arrays on the host
    for (int i = 0; i < N; i++) {
        h_x[i] = 1.0f;
        h_y[i] = 2.0f;
    }

    // Copy inputs to device
    cudaMemcpy(d_x, h_x, N * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_y, h_y, N * sizeof(float), cudaMemcpyHostToDevice);

    // Run kernel on 1M elements on the GPU
    int max_thread = get_gpu_max_threads();
    int blockSize = 512;
    if(N < max_thread)
    {
        // 如果N小于GPU的最大线程数，则将 max_thread 设置为N
        max_thread = N;
    }
    int blockNum = (max_thread + blockSize - 1) / blockSize;
    add <<<blockNum, blockSize >>> (N, d_x, d_y);

    // Wait for GPU to finish before accessing on host
    cudaDeviceSynchronize();

    // Copy result back to host
    cudaMemcpy(h_result, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);

    // Check for errors (all values should be 3.0f)
    float maxError = 0.0f;
    for (int i = 0; i < N; i++) {
        maxError = fmax(maxError, fabs(h_result[i] - 3.0f));
    }
    std::cout << "Max error: " << maxError << std::endl;

    // Free memory
    cudaFree(d_x);
    cudaFree(d_y);
    cudaFreeHost(h_x);
    cudaFreeHost(h_y);
    cudaFreeHost(h_result);

    return 0;
}