
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

//#include <windows.h>
#include <stdio.h>
#include "cublas.h"

// kernels
#include "common.cuh"
#include "dgemm2.cuh"

// Matrix dimensions
int N = 64;
int WA = N;
int HA = N;
int WB = N;

int HB = WA;  // Matrix B height
int WC = WB;  // Matrix C width
int HC = HA;  // Matrix C height

int runGEMM(bool verbose);
bool shouldGetUserInput(int argc, char** argv);
void randomInit(_float*, int);
void printDiff(_float*, _float*, int, int);

void setN(int n)
{
	N = n;
	WA = N;
	HA = N;
	WB = N;
	HB = WA;
	WC = WB;
	HC = HA;
}

void transpose(_float* A, int width, int height)
{
	 _float* tmp = (_float*) malloc(width*height*sizeof(_float));
	 for(int i=0; i < height; ++i)
		 for(int j=0; j < width; ++j)
			 tmp[j*height + i] = A[i*width + j];

	for(int i=0; i < width*height; ++i)		
		A[i] = tmp[i];

	free(tmp);
}

void zero_float(_float* A, int size)
{
	for(int i=0; i < size; ++i)
		A[i] = 0.0;
}

bool getOptions(int argc, char** argv, bool* complete, int *n)
{
	int i = 1;
	*complete = false;
	while( i < argc)
	{
		char* option = argv[i];
		if (strcmp(option, "-r") == 0)
			*complete = true;
		else if( strcmp(option, "-n") == 0)
		{
			++i;
			if( i >= argc)
				return false;
			option = argv[i];
			*n = atoi(option);
		}
		i++;
	}
	return true;
}

int main(int argc, char** argv)
{
	bool complete = false; //-r : run from 96 to 4800
	bool verbose = true;
	int n = 6400;
	if( ! getOptions(argc, argv, &complete, &n) )
	{
		printf( "command line error\n");
		return -1;
	}
	setN(n);
	if(complete)
	{
		verbose = false;
		setN(64);
		while ( N <= 5632)
		{
			runGEMM(verbose);
			setN(N*2);
		}
	}
	else {
		runGEMM(verbose);
	}
//	getchar();
    return 0;
}

int runGEMM(bool verbose)
{
    srand(619);

    if( N <= 0 || N % 64 != 0) {
    	if( N <= 0 )
	    setN(640);
	else
	    {
	    setN(((N/64)+1)*64);
	    }
    }

    // allocate host memory for matrices A and B
    unsigned int size_A = WA * HA;
    unsigned int mem_size_A = sizeof(_float) * size_A;
    _float* h_A = (_float*) malloc(mem_size_A);
    unsigned int size_B = WB * HB;
    unsigned int mem_size_B = sizeof(_float) * size_B;
    _float* h_B = (_float*) malloc(mem_size_B);

    // initialize host memory
    randomInit(h_A, size_A);
    randomInit(h_B, size_B);

	//printf("A:\n");
	//for(int i=0; i < HC; ++i)
	//{
	//	for(int j=0; j < WC; ++j)
	//		printf("(%i,%i) %f, ", i, j, h_A[i*WC+j]);
	//	printf("\n");
	//}

	//printf("B:\n");
	//for(int i=0; i < HC; ++i)
	//{
	//	for(int j=0; j < WC; ++j)
	//		printf("(%i,%i) %f, ", i, j, h_B[i*WC+j]);
	//	printf("\n");
	//}

    // allocate device memory
    _float* A;
    cudaMalloc((void**) &A, mem_size_A);
    _float* B;
    cudaMalloc((void**) &B, mem_size_B);

	//  transpose A and B
	//transpose(h_A, WA, HA);
	//transpose(h_B, WB, HB);
    //cudaMemcpy(A, h_A, mem_size_A, cudaMemcpyHostToDevice);
    //cudaMemcpy(B, h_B, mem_size_B, cudaMemcpyHostToDevice);

    // copy host memory to device
    cudaMemcpy(A, h_A, mem_size_A, cudaMemcpyHostToDevice);
    cudaMemcpy(B, h_B, mem_size_B, cudaMemcpyHostToDevice);

    // allocate device memory for result
    unsigned int size_C = WC * HC;
    unsigned int mem_size_C = sizeof(_float) * size_C;
	// for our rountine
    _float* C;
    cudaMalloc((void**) &C, mem_size_C);
	_float* C2; // for cublas
    cudaMalloc((void**) &C2, mem_size_C);



    // allocate host memory for the result for our routine
    _float* h_C = (_float*) malloc(mem_size_C);
	zero_float(h_C, size_C);
    cudaMemcpy(C, h_C, mem_size_C, cudaMemcpyHostToDevice);

	_float* h_C2 = (_float*) malloc(mem_size_C);
	zero_float(h_C2, size_C);
    cudaMemcpy(C2, h_C2, mem_size_C, cudaMemcpyHostToDevice);

    // create and start timer
  

    // block size and grid size
	// SGEMM
    dim3 blockSize(BLOCK_X, BLOCK_Y);
	dim3 grid(HC / VEC_LENGTH, WC / BY);
	// SGEMM2
	dim3 blockSize2(G2_BLOCK_X, G2_BLOCK_Y);
	dim3 grid2(HC / G2_SMEM_AX, WC / G2_SMEM_AX);
	// DGEMM2
	dim3 dblockSize2(G2_BLOCK_X, G2_BLOCK_Y);
	dim3 dgrid2(HC / 64, WC / 64);

	//printf("grid.x : %i , grid.y %i\n", grid2.x, grid2.y);
	dim3 blocks(N/16, N/16);
	dim3 threads(16, 16);
	// start time
	cudaEvent_t start, stop; 
	float processTimeMS; 
	cudaEventCreate(&start); 
	cudaEventCreate(&stop); 
	cudaEventRecord( start, 0 );
    // execute the kernel

	//mulMatrix<<< blocks, threads >>>(A, B, C, WA, WB);	
	//SGEMM<<< grid, blockSize >>>(A, B, C, HA, WA);	
	//SGEMM2<<< grid2, blockSize2 >>>(A, B, C, HA, WA, WB);
	DGEMM2<<< dgrid2, dblockSize2 >>>(A, B, C, HA, WA, WB);	

	cudaDeviceSynchronize();
	//finish time	
	cudaEventRecord( stop, 0 ); 
	cudaEventSynchronize( stop ); 
	cudaEventElapsedTime( &processTimeMS, start, stop ); 
	cudaEventDestroy( start ); 
	cudaEventDestroy( stop );

    CheckConditionXR_(cudaGetLastError() == cudaSuccess, -1);

    // copy result from device to host
    cudaMemcpy(h_C, C, mem_size_C, cudaMemcpyDeviceToHost);
       
    double gflops = ((2 * WA)/1000.0) * (WC/1000.0) * (HC/1000.0);
	double gFlopsPerS = gflops / (processTimeMS/(double)1000.0);	

    // compute reference solution
	//transpose(h_A, WA, HA);
	//transpose(h_B, WB, HB);

	//cublasSgemm( 'N', 'N', HA, WB, WA, 1.0, A, HA, B, HB, 0.0, C2, HC );
	cublasDgemm( 'N', 'N', HA, WB, WA, 1.0, A, HA, B, HB, 0.0, C2, HC );
	cudaMemcpy(h_C2, C2, mem_size_C, cudaMemcpyDeviceToHost);

	//transpose(h_C, WC, HC);

    _float acc = 0;
	bool res = true;
    for (unsigned int i = 0; i < size_C; ++i)
    {
        _float temp = h_C2[i] - h_C[i];
		if(temp >= 1e-4)
			res = false;
        _float temp_sq = temp * temp;
        acc += temp_sq;
    }

   // bool res = (acc <= 1e-4f);

	// print results
	if(verbose) {
		printf("N = %i\n", N);
		printf("Processing time: %f (ms) \n", processTimeMS);
		printf("Width(A) = %i, Width(C) = %i, Height(C) = %i\n", WA, WC, HC);
		printf("Glops/s: %f\n", gFlopsPerS);
		printf("Test results with CUBLAS: %s , squared total error = %f \n", (1 == res) ? "PASSED" : "FAILED", acc);
	} else
	{
		// N	GFlops/s
		printf("%6i %8f\n", N, gFlopsPerS);
	}

	//for(int i=0; i < HC; ++i)
	//{
	//	for(int j=0; j < WC; ++j)
	//		printf("(%i,%i) %f, %f", i, j, h_C2[i*WC+j], h_C[i*WC+j]);
	//	printf("\n");
	//}
    //if (res!=1) printDiff(h_C2, h_C, WC, HC);

    // clean up memory
    free(h_A);
    free(h_B);
    free(h_C);
	free(h_C2);
   
    cudaFree(A);
    cudaFree(B);
    cudaFree(C);
	cudaFree(C2);

    cudaThreadExit();

    return cudaSuccess;
}

// Allocates a matrix with random float entries.
void randomInit(_float* data, int size)
{
    for (int i = 0; i < size; ++i)
       data[i] = rand() / (_float)RAND_MAX;
	   ////data[i] = rand() % 10;

	//for(int i=0; i < HC; ++i)
	//{
	//	for(int j=0; j < WC; ++j)
	//		data[i*WC+j] = 1.0;
	//	
	//}
}

void printDiff(_float *data1, _float *data2, int width, int height)
{
  int i,j,k;
  int error_count=0;
  for (j=0; j<height; j++) {
    for (i=0; i<width; i++) {
      k = j*width+i;
      if (data1[k] != data2[k]) {
         printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
         error_count++;
      }
    }
  }
  printf(" nTotal Errors = %d n", error_count);
}
