// DEFINITIONS
#define ERROR_RETURN(retval) { fprintf(stderr, "Error %d %s:line %d: \n", retval,__FILE__,__LINE__);    exit(retval); }
#define TILEWIDTH 16

// INCLUDES
#include <cuda.h>
#include <matrix.h>
#include <papi.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>

// PROTOTYPES
void matrixMulOnDevice(float*, float*, float*, int);
__global__ void matrixMulKernel(float*, float*, float*, int);

int
main(int argc, char** argv)
{
	printf("Running %s...\n", argv[0]);

	// Seed RNG.
	srand(time(NULL));

	// Process args.
	if(argc < 2)
	{
		printf("ERRR Not enough arguments.\n");
		return -1;
	}

	int width = atoi(argv[1]);

	if(width < 1)
	{
		printf("ERRR Need positive size argument: %d.\n", width);
		return -1;
	}

	if(width % 16 != 0)
	{
		printf("ERRR Need size to be multiple of 16: %d.\n", width);
		return -1;
	}

	// Initialize matrices.
	int size = width * width * sizeof(float);

	float* M = (float*) malloc(size);
	float* N = (float*) malloc(size);
	float* P = (float*) malloc(size);

	matrixRandInit(M, width);
	matrixRandInit(N, width);
	//matrixOnesInit(M, width);
	//matrixOnesInit(N, width);

	matrixZeroInit(P, width);

	// Set up PAPI.
	// Make sure PAPI library version is correct.
	char errstring[PAPI_MAX_STR_LEN];
	int retval;
	
	if((retval = PAPI_library_init(PAPI_VER_CURRENT)) != PAPI_VER_CURRENT )
	{
		fprintf(stderr, "Error: %s\n", errstring);
		return -1;
	}

	float rtime, ptime, mflops;
	long long flpins;

	// Start counting!
	//if( (retval=PAPI_start(EventSet)) != PAPI_OK)
	if( (retval=PAPI_flops(&rtime, &ptime, &flpins, &mflops)) != PAPI_OK)
		ERROR_RETURN(retval);

	// Run mmm.
	matrixMulOnDevice(M, N, P, width);

	// Get counts.
	if( (retval=PAPI_flops(&rtime, &ptime, &flpins, &mflops)) != PAPI_OK)
		ERROR_RETURN(retval);

	// Close PAPI.
	PAPI_shutdown();

	// Print out results...
	printf("width:\t%d\nrtime:\t%f\nptime:\t%f\n", width, rtime, ptime);

	// Free malloc'd memory.
	free(P);
	free(N);
	free(M);
	
	printf("Done!\n");
	
	return 0;
}

void
matrixMulOnDevice(float* M, float* N, float* P, int width)
{
	// Set size of matrices.
	int size = width * width * sizeof(float);

	// Allocate and copy matrices to device.
	float* Md;
	float* Nd;
	float* Pd;

	/*
	printf("size:\t%d\n", size);
	printMatrix(M, width);
	printMatrix(N, width);
	*/

	cudaMalloc(&Md, size);
	cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);

	cudaMalloc(&Nd, size);
	cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);

	cudaMalloc(&Pd, size);

	// Compute number of thread blocks.
	int gridWidth = width / TILEWIDTH;

	// Invoke the MMM kernel.
	dim3 dimGrid(gridWidth, gridWidth);
	dim3 dimBlock(TILEWIDTH, TILEWIDTH);

	printf("Calling kernel with <<<(%d, %d), (%d, %d)>>>...\n",
		   gridWidth, gridWidth,
		   TILEWIDTH, TILEWIDTH);

	matrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
	
	// Get result from device.
	cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);

	//printMatrix(P, width);

	// Free malloc'd memory.
	cudaFree(Pd);
	cudaFree(Nd);
	cudaFree(Md);
}

__global__ void
matrixMulKernel(float* Md, float* Nd, float* Pd, int width)
{
	int bx = blockIdx.x;
	int by = blockIdx.y;

	int tx = threadIdx.x;
	int ty = threadIdx.y;

	// The row_M and col_N to compute P_(row, col).
	int row = by*TILEWIDTH + ty;
	int col = bx*TILEWIDTH + tx;
	
	// Allocate shared memory for tiles.
	__shared__ float Mds[TILEWIDTH][TILEWIDTH];
	__shared__ float Nds[TILEWIDTH][TILEWIDTH];

	float Pvalue = 0;

	for(int m = 0; m < width/TILEWIDTH; m++)
	{
		// Load tiles into shared memory.
		Mds[ty][tx] = Md[row*width + (m*TILEWIDTH + tx)];
		Nds[ty][tx] = Nd[col + (m*TILEWIDTH + ty)*width];

		__syncthreads();

		// TODO: LET'S UNROLL THIS.
		/*
		for(int k = 0; k < TILEWIDTH; k++)
		{
			Pvalue += Mds[ty][k] * Nds[k][tx];
		}
		*/
		// UNROLLED.
		Pvalue += Mds[ty][0] * Nds[0][tx];
		Pvalue += Mds[ty][1] * Nds[1][tx];
		Pvalue += Mds[ty][2] * Nds[2][tx];
		Pvalue += Mds[ty][3] * Nds[3][tx];
		Pvalue += Mds[ty][4] * Nds[4][tx];
		Pvalue += Mds[ty][5] * Nds[5][tx];
		Pvalue += Mds[ty][6] * Nds[6][tx];
		Pvalue += Mds[ty][7] * Nds[7][tx];
		Pvalue += Mds[ty][8] * Nds[8][tx];
		Pvalue += Mds[ty][9] * Nds[9][tx];
		Pvalue += Mds[ty][10] * Nds[10][tx];
		Pvalue += Mds[ty][11] * Nds[11][tx];
		Pvalue += Mds[ty][12] * Nds[12][tx];
		Pvalue += Mds[ty][13] * Nds[13][tx];
		Pvalue += Mds[ty][14] * Nds[14][tx];
		Pvalue += Mds[ty][15] * Nds[15][tx];

		__syncthreads();
	}

	Pd[row*width + col] = Pvalue;
}
