#include <hw3_common.h>
#include <stdlib.h>
#include <stdio.h>
#include <cublas_v2.h>

__global__ void transpose(float *gpuM, float *gpuM_t, int n){
	//Do transpose on GPU
	int dim_thread_x = n / gridDim.x / blockDim.x ;
	int dim_thread_y = n / gridDim.y / blockDim.y ;

	int x, y ;

	for (y = 0; y < dim_thread_y; y++){
		for (x = 0; x < dim_thread_x; x++){
			int p_y = (blockIdx.y * blockDim.y + threadIdx.y) * dim_thread_y + y ;
			int p_x = (blockIdx.x * blockDim.x + threadIdx.x) * dim_thread_x + x ;
			gpuM_t[p_y * n + p_x] = gpuM[p_x * n + p_y] ;
		}
	}
}

__global__ void basicKernel(float *gpuM, float *gpuM_t, float *gpuRes, int n){

	int dim_thread_x = n / gridDim.x / blockDim.x ;
	int dim_thread_y = n / gridDim.y / blockDim.y ;

	int x, y ;

	for (y = 0; y < dim_thread_y; y++){
		for (x = 0; x < dim_thread_x; x++){
			int p_y = (blockIdx.y * blockDim.y + threadIdx.y) * dim_thread_y + y ;
			int p_x = (blockIdx.x * blockDim.x + threadIdx.x) * dim_thread_x + x ;
			
			float accumu = 0 ;
			int i ;
			for (i = 0; i < n; i++)
				accumu += gpuM[p_y * n + i] * gpuM_t[i * n + p_x] ;
			gpuRes[p_y * n + p_x] = accumu ;
		}
	}
}

__global__ void basicKernel_notranspose(float *gpuM, float *gpuRes, int n){

	int dim_thread_x = n / gridDim.x / blockDim.x ;
	int dim_thread_y = n / gridDim.y / blockDim.y ;

	int x, y ;

	for (y = 0; y < dim_thread_y; y++){
		for (x = 0; x < dim_thread_x; x++){
			int p_y = (blockIdx.y * blockDim.y + threadIdx.y) * dim_thread_y + y ;
			int p_x = (blockIdx.x * blockDim.x + threadIdx.x) * dim_thread_x + x ;
			
			float accumu = 0 ;
			int i ;
			for (i = 0; i < n; i++)
				accumu += gpuM[p_y * n + i] * gpuM[p_x * n + i] ;
			gpuRes[p_y * n + p_x] = accumu ;
		}
	}
}

#define BLOCK_SIZE 16

__global__ void kernel_Share(float *gpuM, float *gpuRes, int n){
	int blockRow = blockIdx.y ;
	int blockCol = blockIdx.x ;

	float *Msub = &gpuM[blockRow * BLOCK_SIZE * n + blockCol * BLOCK_SIZE] ;	//Beginning of the submatrix ;

	float accu = 0 ;

	int row = threadIdx.y ;
	int col = threadIdx.x ;

	for (int m = 0; m < n / BLOCK_SIZE; m++){	//Loop over the submatrix band
		float *MsubA = &gpuM[blockRow * BLOCK_SIZE * n + m * BLOCK_SIZE] ;
		float *MsubB = &gpuM[blockCol * BLOCK_SIZE * n + m * BLOCK_SIZE] ;

		__shared__ float aTile[BLOCK_SIZE][BLOCK_SIZE] ;
		__shared__ float bTile[BLOCK_SIZE][BLOCK_SIZE] ;

		aTile[row][col] = MsubA[row * n + col] ;
		bTile[row][col] = MsubB[row * n + col] ;

		__syncthreads() ;

		for (int i = 0; i < BLOCK_SIZE; i++)
			accu += aTile[row][i] * bTile[col][i] ;

		__syncthreads() ;
	}

	Msub[row * n + col] = accu ;
}

__global__ void kernel_Coalesce(float *gpuM, float *gpuRes, int n){
	int blockRow = blockIdx.y ;
	int blockCol = blockIdx.x ;

	float *Msub = &gpuM[blockRow * BLOCK_SIZE * n + blockCol * BLOCK_SIZE] ;	//Beginning of the submatrix ;

	float accu = 0 ;

	int row = threadIdx.y ;
	int col = threadIdx.x ;

	for (int m = 0; m < n / BLOCK_SIZE; m++){	//Loop over the submatrix band
		float *MsubA = &gpuM[blockRow * BLOCK_SIZE * n + m * BLOCK_SIZE] ;
		float *MsubB = &gpuM[blockCol * BLOCK_SIZE * n + m * BLOCK_SIZE] ;

		__shared__ float aTile[BLOCK_SIZE][BLOCK_SIZE] ;
		__shared__ float bTile[BLOCK_SIZE][BLOCK_SIZE + 1] ;

		aTile[row][col] = MsubA[row * n + col] ;
		bTile[col][row] = MsubB[row * n + col] ;

		__syncthreads() ;

		for (int i = 0; i < BLOCK_SIZE; i++)
			accu += aTile[row][i] * bTile[i][col] ;

		__syncthreads() ;
	}

	Msub[row * n + col] = accu ;
}

int main(int argc, char **argv){

	bool forPlot = false ;
  
	if (argc < 2){
		printf("no input n!\n") ;
		exit(-1) ;
	}
	
	if (argc > 2){
		forPlot = true ;
	}

	int n = atoi(argv[1]) ;

	if (forPlot)
		printf("%d\t", n) ;
	else
		printf("n : %d\n", n) ;

	dim3 dimGrid(8, 8, 1) ;
	dim3 dimBlock(16, 16, 1) ;

	validateDimensions(dimGrid, dimBlock, n) ;

	int numCudaDevices = 0 ;
	struct cudaDeviceProp props ;
	init(&numCudaDevices, &props, true) ;//This will set device 0 as current GPU

	size_t matrixTotalSize = n * n * sizeof(float) ;

	float *matrix = (float *)malloc(matrixTotalSize) ;
	float *result_cuda = (float *)malloc(matrixTotalSize) ;
	float *result_cpu = (float *)malloc(matrixTotalSize) ;
	float *result_share = (float *)malloc(matrixTotalSize) ;
	float *result_coalesce = (float *)malloc(matrixTotalSize) ;

	float *gpuM, *gpuM_t, *gpuRes ;

	//Generate data
	genData(matrix, n * n) ;
	for (int i = 0; i < n; i++){
		for (int j = i + 1; j < n; j++){
			matrix[i * n + j] = matrix[j * n + i] ;
		}
	}

	if (!forPlot)
		printf("\t\tTime\t\tError\n") ;

	{	//CPU computation
		double start = wall_clock() ;
		cpuMult(matrix, result_cpu, n) ;
		if (forPlot)
			printf("%f\t", wall_clock() - start) ;
		else
			printf("CPU\t\t%f\n", wall_clock() - start) ;
	}

	{	//Basic gpu computation
		double start = wall_clock() ;

		safe(cudaMalloc((void**)&gpuM, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuM_t, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuRes, matrixTotalSize), __LINE__) ;
	
		safe(cudaMemcpy(gpuM, matrix, matrixTotalSize, cudaMemcpyHostToDevice), __LINE__) ;

		transpose<<<dimGrid, dimBlock>>>(gpuM, gpuM_t, n) ;
		safe(cudaGetLastError(), __LINE__) ;
	
		basicKernel<<<dimGrid, dimBlock>>>(gpuM, gpuM_t, gpuRes, n) ;
		safe(cudaGetLastError(), __LINE__) ;

		safe(cudaMemcpy(result_cuda, gpuRes, matrixTotalSize, cudaMemcpyDeviceToHost), __LINE__) ;

		safe(cudaFree(gpuM), __LINE__) ;
		safe(cudaFree(gpuM_t), __LINE__) ;
		safe(cudaFree(gpuRes), __LINE__) ;

		if (forPlot)
			printf("%f\t", wall_clock() - start) ;
		else
			printf("Basic gpu\t%f\t%f\n", wall_clock() - start, compare(result_cuda, result_cpu, n)) ;

	}

	{	//Basic gpu computation - no transpose
		double start = wall_clock() ;

		safe(cudaMalloc((void**)&gpuM, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuRes, matrixTotalSize), __LINE__) ;
	
		safe(cudaMemcpy(gpuM, matrix, matrixTotalSize, cudaMemcpyHostToDevice), __LINE__) ;
	
		basicKernel_notranspose<<<dimGrid, dimBlock>>>(gpuM, gpuRes, n) ;
		safe(cudaGetLastError(), __LINE__) ;
	
		safe(cudaMemcpy(result_cuda, gpuRes, matrixTotalSize, cudaMemcpyDeviceToHost), __LINE__) ;

		safe(cudaFree(gpuM), __LINE__) ;
		safe(cudaFree(gpuRes), __LINE__) ;

		if (forPlot)
			printf("%f\t", wall_clock() - start) ;
		else
			printf("No transpose\t%f\t%f\n", wall_clock() - start, compare(result_cuda, result_cpu, n)) ;
	}

	{	//shared
		double start = wall_clock() ;

		safe(cudaMalloc((void**)&gpuM, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuRes, matrixTotalSize), __LINE__) ;
	
		safe(cudaMemcpy(gpuM, matrix, matrixTotalSize, cudaMemcpyHostToDevice), __LINE__) ;
	
		dim3 dimGrid(n / BLOCK_SIZE, n / BLOCK_SIZE, 1) ;
		dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1) ;
		kernel_Share<<<dimGrid, dimBlock>>>(gpuM, gpuRes, n) ;
		safe(cudaGetLastError(), __LINE__) ;
	
		safe(cudaMemcpy(result_share, gpuRes, matrixTotalSize, cudaMemcpyDeviceToHost), __LINE__) ;

		safe(cudaFree(gpuM), __LINE__) ;
		safe(cudaFree(gpuRes), __LINE__) ;

		if (forPlot)
			printf("%f\t", wall_clock() - start) ;
		else
			printf("Share\t\t%f\t%f\n", wall_clock() - start, compare(result_share, result_cpu, n)) ;
	}

	{	//coalesce
		double start = wall_clock() ;

		safe(cudaMalloc((void**)&gpuM, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuRes, matrixTotalSize), __LINE__) ;
	
		safe(cudaMemcpy(gpuM, matrix, matrixTotalSize, cudaMemcpyHostToDevice), __LINE__) ;
	
		dim3 dimGrid(n / BLOCK_SIZE, n / BLOCK_SIZE, 1) ;
		dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1) ;
		kernel_Coalesce<<<dimGrid, dimBlock>>>(gpuM, gpuRes, n) ;
		safe(cudaGetLastError(), __LINE__) ;
	
		safe(cudaMemcpy(result_coalesce, gpuRes, matrixTotalSize, cudaMemcpyDeviceToHost), __LINE__) ;

		safe(cudaFree(gpuM), __LINE__) ;
		safe(cudaFree(gpuRes), __LINE__) ;

		if (forPlot)
			printf("%f\t", wall_clock() - start) ;
		else
			printf("Coalesce\t%f\t%f\n", wall_clock() - start, compare(result_coalesce, result_cpu, n)) ;
	}

	{	//CUBLAS
		double start = wall_clock() ;

		safe(cudaMalloc((void**)&gpuM, matrixTotalSize), __LINE__) ;
		safe(cudaMalloc((void**)&gpuRes, matrixTotalSize), __LINE__) ;
	
		safe(cudaMemcpy(gpuM, matrix, matrixTotalSize, cudaMemcpyHostToDevice), __LINE__) ;
	
		cublasHandle_t t_Handle;
		cublasStatus_t status;

		status = cublasCreate(&t_Handle);
		if (status != CUBLAS_STATUS_SUCCESS){
			printf("Cublas failed!\n") ;
			exit(-1) ;
		}
		
		float alpha = 1;
		float beta = 0;
		
		status = cublasSsyrk(t_Handle,CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, n, n, &alpha, gpuM, n, &beta, gpuRes, n);
		if (status != CUBLAS_STATUS_SUCCESS){
			printf("Cublas failed!\n") ;
			exit(-1) ;
		}

		safe(cudaGetLastError(), __LINE__) ;
	
		safe(cudaMemcpy(result_coalesce, gpuRes, matrixTotalSize, cudaMemcpyDeviceToHost), __LINE__) ;

    		cublasDestroy(t_Handle);
		safe(cudaFree(gpuM), __LINE__) ;
		safe(cudaFree(gpuRes), __LINE__) ;

		if (forPlot)
			printf("%f\n", wall_clock() - start) ;
		else
			printf("CUBLAS\t\t%f\t%f\n", wall_clock() - start, compare(result_coalesce, result_cpu, n)) ;
	}

	free(matrix) ;
	free(result_cuda) ;
	free(result_cpu) ;
	free(result_share) ;
	free(result_coalesce) ;

	return 0 ;
}
