#include <iostream>
#include <cuda_runtime.h>
#include <math.h>
using namespace std;

// CUDA核函数：对数组中的每个元素执行开方运算
__global__ void sqrtKernel(float* data, int size) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx < size) {
		data[idx] = sqrt(data[idx]);
	}
}

void sqrt_by_cpu(float* data, int size)
{
	for (int i = 0; i < size; i++)
	{
		data[i] = sqrt(data[i]);
	}
}

int main(int argc, char* argv[])
{
	const int size = 1024 * 1024;   // 1M
	float* h_pinned, * d_data;
	cudaError_t err;
	bool gpu_cal_flag = true;
	if (argc > 1)
	{
		gpu_cal_flag = false;
	}

	// 1.分配host pinned memory
	err = cudaMallocHost((void**)&h_pinned, size * sizeof(float));
	if (err != cudaSuccess)
	{
		cerr << "pinned mem malloc failed!" << endl;
		return -1;
	}
	// 2.分配device memory
	err = cudaMalloc(&d_data, size * sizeof(float));
	if (err != cudaSuccess)
	{
		cerr << "device mem malloc failed!" << endl;
		return -1;
	}

	// 3.初始化数据
	for (int i = 0; i < size; i++)
	{
		h_pinned[i] = i;
	}

	if (gpu_cal_flag)
	{
		// 4.拷贝数据: 主机到设备
		cudaMemcpy(d_data, h_pinned, size * sizeof(float), cudaMemcpyHostToDevice);

		// 5.cuda核函数
		int threadsPerBlock = 1024;
		int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
		sqrtKernel << <blocksPerGrid, threadsPerBlock >> > (d_data, size);

		// 等待GPU完成计算
		cudaDeviceSynchronize();
		printf("blocksPerGrid=%d, threadsPerBlock=%d\n", blocksPerGrid, threadsPerBlock);

		// 6.拷贝数据: 设备到主机
		cudaMemcpy(h_pinned, d_data, size * sizeof(float), cudaMemcpyDeviceToHost);

		// 7.释放gpu内存
		cudaFree(d_data);
	}
	else
	{
		sqrt_by_cpu(h_pinned, size);
	}

	// 7.打印前n数据
	for (int i = 0; i < 16; i++)
	{
		cout << h_pinned[i] << " ";
	}
	cout << endl;

	// 8.释放内存
	cudaFreeHost(h_pinned);

	return 0;
}
