#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>

#define MATRIX_SIZE 1000
#define BLOCK_SIZE 32
int DevicedChoosed = 0;

void printDeviceProp(const cudaDeviceProp& prop)
{
	printf("Device Name : %s.\n", prop.name);
	printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
	printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
	printf("regsPerBlock : %d.\n", prop.regsPerBlock);
	printf("warpSize : %d.\n", prop.warpSize);
	printf("memPitch : %d.\n", prop.memPitch);
	printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
	printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
	printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
	printf("totalConstMem : %d.\n", prop.totalConstMem);
	printf("major.minor : %d.%d.\n", prop.major, prop.minor);
	printf("clockRate : %d.\n", prop.clockRate);
	printf("textureAlignment : %d.\n", prop.textureAlignment);
	printf("deviceOverlap : %d.\n", prop.deviceOverlap);
	printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}

//CUDA 初始化
bool InitCUDA()
{
	int count;
	//取得支持Cuda的装置的数目
	cudaGetDeviceCount(&count);
	if (count == 0) {
		fprintf(stderr, "There is no device.\n");
		return false;
	}
	int i;
	for (i = 0; i < count; i++) {
		cudaDeviceProp prop;
		cudaGetDeviceProperties(&prop, i);
		//打印设备信息
		printDeviceProp(prop);
		if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
			if (prop.major >= 1) {
				break;
			}
		}
	}

	if (i == count) {
		fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
		return false;
	}
	cudaSetDevice(i);
	DevicedChoosed = i;
	return true;
}

void matMultCPU(const float* a, const float* b, float* c, int n)
{
#pragma omp parallel for
	for (int i = 0; i < n; i++)
	{
		for (int j = 0; j < n; j++)
		{
			double t = 0;
			for (int k = 0; k < n; k++)
			{
				t += (double)a[i * n + k] * b[k * n + j];
			}
			c[i * n + j] = t;
		}
	}
}

//GPU并行计算矩阵乘法
__global__ void matMultCUDAKernel1(const float* a, const float* b, float* c, int n)
{
	//计算这个 thread 应该计算的 row 和 col
	const int col = blockIdx.x * blockDim.x + threadIdx.x;
	const int row = blockIdx.y * blockDim.y + threadIdx.y;

	int i;
	//计算矩阵乘法 Kahan’s Summation Formula
	if (row < n && col < n)
	{
		for (i = 0; i < n; i++)
		{
			c[row * n + col] = a[row * n + i] * b[i * n + col];
		}
	}
}

// 优化GPU并行计算矩阵乘法 共享内存
__global__ void matMultCUDAKernel2(const float* a, const float* b, float* c, int n)
{
	//计算这个 thread 应该计算的 row 和 col
	const int col = blockIdx.x * blockDim.x + threadIdx.x;
	const int row = blockIdx.y * blockDim.y + threadIdx.y;

	//显式声明共享内存a，b子矩阵块
	__shared__ float shareA[BLOCK_SIZE][BLOCK_SIZE];
	__shared__ float shareB[BLOCK_SIZE][BLOCK_SIZE];
	float y = 0;

	//计算矩阵乘法 Kahan’s Summation Formula
	for (int m = 0; m < (n - 1) / BLOCK_SIZE + 1; m++)
	{
		// load data from global memory to shared memory
		shareA[threadIdx.y][threadIdx.x] = a[row * n + (m * BLOCK_SIZE + threadIdx.x)];
		shareB[threadIdx.y][threadIdx.x] = b[(m * BLOCK_SIZE + threadIdx.y) * n + col];
		// sync to wait for all threads in one block to finish loading datas
		__syncthreads();

		for (int i = 0; i < BLOCK_SIZE; i++)
		{
			y += shareA[threadIdx.y][i] * shareB[i][threadIdx.x];
		}
		// sync to wait for all threads in one block to finish compute
		__syncthreads();
	}
	// store results into global memory
	if (row < n && col < n)
		c[row * n + col] += y;
}

void genMat(float* arr, int n)
{
	int i, j;

	for (i = 0; i < n; i++)
	{
		for (j = 0; j < n; j++)
		{
			arr[i * n + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX);
		}
	}

}

typedef struct Error {
	float max;
	float average;
}Error;

Error accuracyCheck(const float* a, const float* b, int n)
{
	Error err;
	err.max = 0;
	err.average = 0;
	for (int i = 0; i < n; i++)
	{
		for (int j = 0; j < n; j++)
		{
			if (b[i * n + j] != 0)
			{
				//fabs求浮点数x的绝对值
				float delta = fabs((a[i * n + j] - b[i * n + j]) / b[i * n + j]);
				if (err.max < delta) err.max = delta;
				err.average += delta;
			}
		}
	}
	err.average = err.average / (n * n);
	return err;
}


int main(int argc, char** argv)
{

	//CUDA 初始化
	if (!InitCUDA()) return 0;
	cudaDeviceProp prop;
	cudaGetDeviceProperties(&prop, DevicedChoosed);
	//定义矩阵
	float* a, * b, * c, * d;
	int n = MATRIX_SIZE;
	if (argc >= 2) n = atoi(argv[1]) > 0 ? atoi(argv[1]) : MATRIX_SIZE;

	//分配host内存
	cudaMallocHost((void**)&a, sizeof(float) * n * n);
	cudaMallocHost((void**)&b, sizeof(float) * n * n);
	cudaMallocHost((void**)&c, sizeof(float) * n * n);
	d = (float*)malloc(sizeof(float) * n * n);

	genMat(a, n);
	genMat(b, n);

	float* cuda_a, * cuda_b, * cuda_c;
    cudaEvent_t start, end;
    cudaEventCreate(&start);
    cudaEventRecord(start);
	//分配GPU上的内存
	cudaMalloc((void**)&cuda_a, sizeof(float) * n * n);
	cudaMalloc((void**)&cuda_b, sizeof(float) * n * n);
	cudaMalloc((void**)&cuda_c, sizeof(float) * n * n);

	//拷贝数据至GPU内存
	cudaMemcpy(cuda_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice);
	cudaMemcpy(cuda_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice);
    cudaEventRecord(start);
	//调用核函数计算
	dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
	dim3 gridSize((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE, 1);
	int flag = 1;
	if (argc >= 3) flag = atoi(argv[2]) > 1 ? atoi(argv[2]) : 1;
	switch (flag)
	{
	case 1:
		matMultCUDAKernel1 << <gridSize, blockSize >> > (cuda_a, cuda_b, cuda_c, n);
		break;
	case 2:
		matMultCUDAKernel2 << <gridSize, blockSize >> > (cuda_a, cuda_b, cuda_c, n);
		break;	
	}

    cudaEventCreate(&end);
    cudaEventRecord(end);

    cudaEventSynchronize(end);

    float msec, sec;
    cudaEventElapsedTime(&msec, start, end);

	//计算结果复制回主存，隐式调用同步函数
	cudaMemcpy(c, cuda_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost);
	//释放GPU上的内存
	cudaFree(cuda_a);
	cudaFree(cuda_b);
	cudaFree(cuda_c);
	//GPU memory
	printf("GPU memory: %e MB\n", (double)(n * n * 8) / (1024. * 1024.));
	//GPU time
	printf("GPU time: %3f ms\n", (double)(msec) / CLOCKS_PER_SEC * 1000.0);

    clock_t begin, stop;
	//CPU time
	begin = clock();
	//matMultCPU(a, b, d, n);
	stop = clock();
	printf("CPU time: %3f ms\n", (double)(stop - begin) / CLOCKS_PER_SEC * 1000.0);
	//精度检测
	Error error;
	error = accuracyCheck(c, d, n);
	printf("Max error: %g Average error: %g\n", error.max, error.average);

	return 0;
}
