/*
目的: gpu 计算耗时统计

*/

#include <stdio.h>
#include <sys/time.h>
#include <cuda_runtime.h>

// cuda核函数: 计算数组的立方和
__global__ void cal_array_cube(float* data, int size, float* result, clock_t* gpu_clock)
{
    float sum = 0.0;
    clock_t start = clock();
    for (int i = 0; i < size; i++) {
        sum += data[i] * data[i] * data[i];
    }

    *result = sum;
    *gpu_clock = clock() - start;
}

float* gen_data(int size)
{
    float* data = new float[size];
    for (int i = 0; i < size; i++) {
        data[i] = i * 1.0;
    }

    return data;
}

double gpu_cal_time_by_clock(clock_t gpu_clokc)
{
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, 0);
    // prop.clockRate 是Khz
    return gpu_clokc / (prop.clockRate * 1000);
}

float gpu_cal(float* data, int data_size)
{
    float result = 0.0;
    clock_t gpu_clock = 0;
    struct timeval start_tv, end_tv, cpu_tv;
    gettimeofday(&start_tv, NULL);

    // 1. 定义device内存, 并将host内存拷贝到device内存
    float* d_data, * d_result;
    clock_t* d_gpu_clock;
    cudaMalloc((void**)&d_data, sizeof(float) * data_size);
    cudaMemcpy(d_data, data, sizeof(float) * data_size, cudaMemcpyHostToDevice);
    cudaMalloc((void**)&d_result, sizeof(float));
    cudaMalloc((void**)&d_gpu_clock, sizeof(clock_t));

    // 2. 调用核函数
    cal_array_cube <<<1, 1 >>> (d_data, data_size, d_result, d_gpu_clock);

    // 3. 将device内存拷贝到host内存
    cudaMemcpy(&result, d_result, sizeof(float), cudaMemcpyDeviceToHost);
    cudaMemcpy(&gpu_clock, d_gpu_clock, sizeof(clock_t), cudaMemcpyDeviceToHost);

    // 4. 释放device内存
    cudaFree(d_data);
    cudaFree(d_result);

    gettimeofday(&end_tv, NULL);
    timersub(&end_tv, &start_tv, &cpu_tv);
    double cpu_time = cpu_tv.tv_sec + cpu_tv.tv_usec / 1000000.0;
    // 打印gpu计算结果和耗时
    fprintf(stdout, "gpu result = %f, gpu耗时= %lf, cpu耗时=%lf\n", result, gpu_cal_time_by_clock(gpu_clock), cpu_time);

    return result;
}

double get_now_time(void)
{
    struct timeval tv;
    gettimeofday(&tv, NULL);
    return tv.tv_sec + tv.tv_usec / 1000000.0;
}


float cpu_cal(float* data, int data_size)
{
    // cpu计算结果
    float result = 0.0;
    struct timeval start_tv, end_tv, cpu_tv;
    gettimeofday(&start_tv, NULL);

    for (int i = 0; i < data_size; i++) {
        result += data[i] * data[i] * data[i];
    }

    gettimeofday(&end_tv, NULL);
    timersub(&end_tv, &start_tv, &cpu_tv);
    double cpu_time = cpu_tv.tv_sec + cpu_tv.tv_usec / 1000000.0;
    fprintf(stdout, "cpu result = %f,  耗时 = %lf\n", result, cpu_time);

    return result;
}

int main()
{
    // 定义host内存
    int data_size = 1024;
    float* data = gen_data(data_size);

    gpu_cal(data, data_size);
    cpu_cal(data, data_size);

    // 释放host内存
    delete[] data;

    return 0;
}
/* 运行结果:
gpu result = 274341281792.000000, gpu耗时= 0.000000, cpu耗时=0.191930
cpu result = 274341281792.000000,  耗时 = 0.000002

结论: gpu无并发的情况下, 计算速度是cpu的10倍左右
 */