#include <stdio.h>
#include "denseMatrix.cuh"
#include "sparseMatrix.cuh"

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <time.h>
#include <cstring>


void logStatus(cudaError_t status, char *position){
	if(status != cudaSuccess){
		printf("Error: %s at %s\n", cudaGetErrorString(status), position);
	}
}

void multiplySparseMatrixCPU(sparseMatrix_t *a, sparseMatrix_t *b, denseMatrix_t *c){
	int i,j,k_a,k_b, el_a, el_b;
	double temp;
	for(i=0;i<a->numRows;i++){
		for(j=0;j<b->numCols;j++){
			el_a = a->index[i];
			el_b = b->index[j];
			temp=0.0;
			if(el_a==-1 || el_b == -1)continue;
			while(a->row[el_a] == i && b->col[el_b] == j){
				k_a = a->col[el_a];
				k_b = b->row[el_b];
				
				if(k_a == k_b){
					temp += a->data[el_a] * b->data[el_b];
					//printf("Multiplying [%d][%d] & [%d][%d]\n", a->row[el_a], a->col[el_a], b->row[el_b], b->col[el_b]);
					el_a++;
					el_b++;
					
				} else if(k_a > k_b){
					el_b++;
				} else if(k_a < k_b){
					el_a++;
				}
			}
			c->data[c->index(i,j)] = temp;
		}
	}
}

void multiplyDenseMatrixCPU(denseMatrix_t *a, denseMatrix_t *b, denseMatrix_t *c){
	int i,j,k;
	double temp;
	for(i=0; i<a->numCols; i++){
		for(j=0; j<b->numRows;j++){
			temp = 0.0;
			for(k=0; k<a->numRows; k++){
				temp += a->data[a->index(i,k)]*b->data[b->index(k,j)];
			}
			c->data[c->index(i,j)] = temp;
		}
	}
}

__global__ void multiplyDenseMatrixKernel(denseMatrix_t *a, denseMatrix_t *b, denseMatrix_t *c){
	int i,j,k;
	double temp;
	i = blockIdx.x * blockDim.x + threadIdx.x;
	j = blockIdx.y * blockDim.y + threadIdx.y;
	if( i>=c->numRows || j>=c->numCols )return;

	temp = 0.0;
	for(k=0; k<a->numRows; k++){
		temp += a->data[a->numCols*i + k]*b->data[b->numRows*k + j];
	}
	c->data[c->numCols * i + j] = temp;
}

__global__ void multiplySparseMatrixKernel(sparseMatrix_t *a, sparseMatrix_t *b, denseMatrix_t *c){
	int i, j, k_a, k_b, el_a, el_b;
	double temp;
	i = blockIdx.x * blockDim.x + threadIdx.x;
	j = blockIdx.y * blockDim.y + threadIdx.y;
	if( i>=c->numRows || j>=c->numCols )return;
	el_a = a->index[i];
	el_b = b->index[j];
	temp=0.0;
	if(el_a==-1 || el_b == -1)return;
	while(a->row[el_a] == i && b->col[el_b] == j){
		k_a = a->col[el_a];
		k_b = b->row[el_b];
				
		if(k_a == k_b){
			temp += a->data[el_a] * b->data[el_b];
			el_a++;
			el_b++;
					
		} else if(k_a > k_b){
			el_b++;
		} else if(k_a < k_b){
			el_a++;
		}
	}
	c->data[c->numCols * i + j] = temp;
}

void multiplyDenseMatrixGPU(denseMatrix_t *h_a, denseMatrix_t *h_b, denseMatrix_t *h_c){
	dim3 blockDim, gridDim;
	denseMatrix_t *d_a,*d_b,*d_c;
	cudaError_t status = cudaSuccess;

	printf("\tTransfering Dense Matrix to GPU...\n");
	d_a = h_a->transferMatrixToGPU();
	d_b = h_b->transferMatrixToGPU();
	d_c = h_c->transferMatrixToGPU();
	printf("\tTransfering Dense Matrix to GPU...\n");

	blockDim.x = 16;
	blockDim.y = 16;
	gridDim.x = h_a->numRows / blockDim.x + 1;
	gridDim.y = h_a->numCols / blockDim.x + 1;

	multiplyDenseMatrixKernel<<<gridDim, blockDim>>>(d_a->onDevice, d_b->onDevice, d_c->onDevice);
	status = cudaGetLastError();
	logStatus(status, "Multiply Dense Matrix Kernel");
	cudaDeviceSynchronize();
	h_c->copyDataToCPU(*d_c);
}

void multiplySparseMatrixGPU(sparseMatrix_t *h_a, sparseMatrix_t *h_b, denseMatrix_t *h_c){
	dim3 blockDim, gridDim;
	denseMatrix_t *d_c;
	sparseMatrix_t *d_a,*d_b;
	cudaError_t status = cudaSuccess;

	printf("Transfering Sparse Matrix to GPU...\n");
		d_a = h_a->transferMatrixToGPU();
		d_b = h_b->transferMatrixToGPU();
		d_c = h_c->transferMatrixToGPU();
	printf("Transfering Sparse Matrix to GPU...Success\n");

	blockDim.x = 16;
	blockDim.y = 16;
	gridDim.x = h_a->numRows / blockDim.x + 1;
	gridDim.y = h_a->numCols / blockDim.x + 1;

	multiplySparseMatrixKernel<<<gridDim, blockDim>>>(d_a->onDevice, d_b->onDevice, d_c->onDevice);
	status = cudaGetLastError();
	logStatus(status, "Multiply Sparse Matrix Kernel");
	status = cudaDeviceSynchronize();
	logStatus(status, "SM Device Synchronise");
	h_c->copyDataToCPU(*d_c);
}

void denseToSparseRow(denseMatrix_t *a, sparseMatrix_t *b){
	int i,j,k;
	k=0;
	for(i=0;i<a->numRows;i++){
		for(j=0;j<a->numCols;j++){
			if(a->data[a->index(i,j)]!=0.0){
				b->data[k] = a->data[a->index(i,j)];
				b->row[k] = i;
				b->col[k] = j;
				k++;
			}
		}
	}
}

void denseToSparseCol(denseMatrix_t *a, sparseMatrix_t *b){
	int i,j,k;
	k=0;
	for(i=0;i<a->numCols;i++){
		for(j=0;j<a->numRows;j++){
			if(a->data[a->index(j,i)]!=0.0){
				b->data[k] = a->data[a->index(j,i)];
				b->col[k] = i;
				b->row[k] = j;
				k++;
			}
		}
	}
}

void compareMatrices(denseMatrix_t *a, denseMatrix_t *b){
	int i;
	int succ, fail;
	succ = 0;
	fail = 0;
	for(i=0; i< a->numCols * a->numRows; i++){
		if(a->data[i] != b->data[i]){
			printf("a = %g, b = %g, at [%d , %d]\n", a->data[i], b->data[i], i/b->numRows, i%b->numRows);
			fail++;
		} else {
			succ++;
		}
	}
	printf("Correct = %d, Failed = %d\n", succ,fail);
}
void initMatrixToZero(denseMatrix_t *a){
	int i,j;
	for(i=0;i<a->numRows;i++){
		for(j=0;j<a->numCols;j++){
			a->data[a->index(i,j)] = 0;
		}
	}
}

void initMatrixToValues(denseMatrix_t *a, int density){
	int i,j;
	for(i=0;i<a->numRows;i++){
		for(j=0;j<a->numCols;j++){
			if((i*a->numCols + j)%density == 0){
				a->data[a->index(i,j)] = (i+j)+2;
			} else {
				a->data[a->index(i,j)] = 0;
			}
		}
	}
}


int main(int argc, char** argv){
	denseMatrix_t d_a, d_b, d_c, d_d, d_e, d_f;
	sparseMatrix_t s_a, s_b;
	int REPEATS,SIZE,r,DENSITY;
	clock_t start, end;
	double timeDense=0, timeSparse=0;
	float timeDenseGPU=0, timeSparseGPU=0;
	FILE *fileCPU, *fileGPU;
	char *filename, *cpuFile, *gpuFile;
	cudaEvent_t cStart, cEnd;
	cudaError_t status = cudaSuccess;
	cudaEventCreate(&cStart);
	cudaEventCreate(&cEnd);

	filename = new char[100];
	cpuFile = new char[100];
	gpuFile = new char[100];
	
	if(argc == 1){
		printf("Starting with default arguments...\n");
		SIZE = 403;	//DEFAULT VALUES
		DENSITY =100; //DEFAULT VALUES
		strcpy(filename,"debug.csv");
		strcpy(cpuFile, "CPU");
		strcpy(gpuFile, "GPU");
		strcpy(cpuFile+3,filename);
		strcpy(gpuFile+3,filename);
		fileCPU = fopen(cpuFile,"a");
		fileGPU = fopen(gpuFile,"a");
	}
	else if(argc == 4){
		printf("Starting with 4 arguments...\n");
		SIZE = atoi(argv[1]);
		DENSITY = atoi(argv[2]);
		strcpy(filename,argv[3]);
		strcpy(cpuFile, "CPU");
		strcpy(gpuFile, "GPU");
		strcpy(cpuFile+3,filename);
		strcpy(gpuFile+3,filename);
		fileCPU = fopen(cpuFile,"a");
		fileGPU = fopen(gpuFile,"a");
	}
	else {
		printf("Wrong Arguments, Use <name> <size> <density> <filenames>\n");
		return -1;
	}

	REPEATS = 100000 / (SIZE * SIZE);
	if(REPEATS < 1) REPEATS = 1;
	if(REPEATS > 100) REPEATS = 100;
	

	
	printf("Allocating...\n");
	d_a.allocateData(SIZE, SIZE);
	d_b.allocateData(SIZE, SIZE);
	d_c.allocateData(SIZE, SIZE);
	d_d.allocateData(SIZE, SIZE);
	d_e.allocateData(SIZE, SIZE);
	d_f.allocateData(SIZE, SIZE);
	printf("Allocating...Success\n");
	printf("Initializing...\n");
	initMatrixToValues(&d_a, DENSITY);
	initMatrixToValues(&d_b, DENSITY);
	initMatrixToZero(&d_c);
	initMatrixToZero(&d_d);
	initMatrixToZero(&d_e);
	initMatrixToZero(&d_f);
	printf("Initializing...Success\n");

	printf("Allocating...\n");
	s_a.allocateData(d_a.numRows, d_a.numCols, d_a.nonNullElements());
	s_b.allocateData(d_b.numRows, d_b.numCols, d_b.nonNullElements());
	printf("Allocating...Success\n");
	printf("Converting...\n");
	denseToSparseRow(&d_a, &s_a);
	denseToSparseCol(&d_b, &s_b);
	printf("Converting...Success\n");

	s_a.buildIndex(byRow);
	s_b.buildIndex(byCol);

	s_a.testSort(byRow);
	s_b.testSort(byCol);
	
	//s_a.printMatrix(byRow);
	//s_b.printMatrix(byCol);


	printf("Multiplying Dense on CPU...\n");
	start = clock();
	for(r=0;r<REPEATS;r++){
		multiplyDenseMatrixCPU( &d_a, &d_b, &d_c );
	}
	end = clock();
	timeDense = (double)(end - start)/(double)((CLOCKS_PER_SEC * REPEATS) / 1000);
	printf("Multiplying Dense on CPU...Success\n");

	printf("Multiplying Sparse on CPU...\n");
	start = clock();
	for(r=0;r<REPEATS;r++){
		multiplySparseMatrixCPU( &s_a, &s_b, &d_d);
	}
	end = clock();
	timeSparse = (double)(end - start)/(double)((CLOCKS_PER_SEC * REPEATS) / 1000);
	printf("Multiplying Sparse on CPU...Success\n");
		


		
	printf("Multiplying Dense on GPU...\n");
	status = cudaEventRecord(cStart);
	logStatus(status, "Timing");
	for(r=0;r<REPEATS;r++){
		multiplyDenseMatrixGPU( &d_a, &d_b, &d_e );
	}
	status = cudaEventRecord(cEnd);
	cudaEventSynchronize(cEnd);
	logStatus(status, "Timing");
	status = cudaEventElapsedTime(&timeDenseGPU,cStart,cEnd);
	logStatus(status, "Timing");
	timeDenseGPU /= (float)REPEATS;
	printf("Multiplying Dense on GPU...Success\n");


	printf("Multiplying Sparse on GPU...\n");
	status = cudaEventRecord(cStart);
	logStatus(status, "Timing");
	for(r=0;r<REPEATS;r++){
		multiplySparseMatrixGPU( &s_a, &s_b, &d_f);
	}
	status = cudaEventRecord(cEnd);
	logStatus(status, "Timing");
	cudaEventSynchronize(cEnd);
	status = cudaEventElapsedTime(&timeSparseGPU,cStart,cEnd);
	logStatus(status, "Timing");
	timeSparseGPU /= REPEATS;
	printf("Multiplying Sparse on GPU...Success\n");



	printf("Comparing...\n");
	compareMatrices(&d_c, &d_d);
	printf("Comparing...Success\n");

	printf("Comparing...\n");
	compareMatrices(&d_c, &d_e);
	printf("Comparing...Success\n");

	printf("Comparing...\n");
	compareMatrices(&d_c, &d_f);
	printf("Comparing...Success\n");

	printf("Time for Dense Matrix Multiplication: %g ms\n", timeDense);
	printf("Time for Sparse Matrix Multiplication: %g ms\n", timeSparse);
	printf("Time for Dense Matrix Multiplication on GPU: %g ms\n", timeDenseGPU);
	printf("Time for Sparse Matrix Multiplication on GPU: %g ms\n", timeSparseGPU);
	fprintf(fileCPU,"%d;%g;%g\n",SIZE,timeDense,timeSparse);
	fprintf(fileGPU,"%d;%g;%g\n",SIZE,timeDenseGPU, timeSparseGPU);
	printf("Deallocation...\n");
	d_a.deallocate();
	d_b.deallocate();
	d_c.deallocate();
	d_d.deallocate();
	d_e.deallocate();
	d_f.deallocate();
	s_a.deallocate();
	s_b.deallocate();
		
	printf("Deallocation...Success\n");
	
	fclose(fileCPU);
	fclose(fileGPU);
	cudaDeviceReset();
	return 0;
}