/*
 * Spring 2012
 * ECE519 Massively Parallel Programming
 * 
 * Homework 4 skeleton code provided by Won-Ki Jeong (wkjeong@unist.ac.kr)
 *
 * compile option : nvcc -arch=sm_20
 *
 */
//#include "def.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//#include <cutil.h>
#include <cuda_runtime.h>
//#include <cutil_inline.h>


#define KERNEL_RADIUS	20
#define KERNEL_SIZE		(2*KERNEL_RADIUS + 1)
#define COLS			4096
#define ROWS			4096
//
// 1D convolution using a given kernel
//
// direction 0 : along x
//           1 : along y
//
void kernel_1d_convolution_cpu(float *_out, const float *_in, const float *_kernel, int direction) 
{
	for(int y=0; y<ROWS; y++)
	{
		for(int x=0; x<COLS; x++)
		{
			// for each point
			double _sum = 0;
			int i,j;

			for(int k=0; k<KERNEL_SIZE; k++)
			{
				// along x
				if(direction == 0)
				{
					i = max(min(x - KERNEL_RADIUS + k, COLS-1),0);
					j = y;
				}
				else // along y
				{
					i = x;
					j = max(min(y - KERNEL_RADIUS + k, ROWS-1),0);
				}

				_sum += (double)_in[j*COLS + i]*(double)_kernel[k];
				
			}
			_out[y*COLS + x] = _sum;
		}
	}
}

__global__
void kernel_1d_convolution_gpu(float *_out, const float *_in, const float *_kernel, int direction)
{
	//__shared__ float c_kernel[21];
	//if(threadIdx.x<21)
	//	c_kernel[threadIdx.x] = _kernel[threadIdx.x];
	//__syncthreads();
  int x = blockIdx.x*blockDim.x + threadIdx.x;
  int y = blockIdx.y*blockDim.y + threadIdx.y;

  if(x < COLS && y < ROWS)
  {
	double _sum = 0;
	int i,j;

	for(int k=0; k<KERNEL_SIZE; k++)
	{
		// along x
		if(direction == 0)
		{
			i = max(min(x - KERNEL_RADIUS + k, COLS-1),0);
			j = y;
		}
		else // along y
		{
			i = x;
			j = max(min(y - KERNEL_RADIUS + k, ROWS-1),0);
		}

		_sum += (double)_in[j*COLS + i]*(double)_kernel[k];
		//_sum += (double)_in[j*COLS + i]*(double)c_kernel[k];
	}
	_out[y*COLS + x] = _sum;
  }
}



void kernel_2d_separable_convolution_gpu(float *_out, const float *_in, const float *kernel)
{
	float *d_in, *d_temp, *d_out, *d_kernel;

	cudaMalloc((void**)&d_in, sizeof(float)*COLS*ROWS);
	cudaMalloc((void**)&d_temp, sizeof(float)*COLS*ROWS);
	cudaMalloc((void**)&d_out, sizeof(float)*COLS*ROWS);
	cudaMalloc((void**)&d_kernel, sizeof(float)*(2*KERNEL_RADIUS+1));

	cudaMemcpy(d_in, _in, sizeof(float)*COLS*ROWS, cudaMemcpyHostToDevice);
	
	cudaMemcpy(d_kernel, kernel, sizeof(float)*(2*KERNEL_RADIUS+1), cudaMemcpyHostToDevice);
	
	//cudaMemcpyToSymbol(c_kernel, kernel, 21 * sizeof(float));
	
	dim3 dimBlock = dim3(32,32,1);
	dim3 dimGrid  = dim3(COLS/dimBlock.x, ROWS/dimBlock.y, 1);

	// Two 1D convolutions
	kernel_1d_convolution_gpu<<< dimGrid, dimBlock >>>(d_temp, d_in, d_kernel, 0);
	kernel_1d_convolution_gpu<<< dimGrid, dimBlock >>>(d_out, d_temp, d_kernel, 1);
	//kernel_1d_convolution_gpu<<< dimGrid, dimBlock >>>(d_temp, d_in, c_kernel, COLS, ROWS, KERNEL_RADIUS, 0);
	//kernel_1d_convolution_gpu<<< dimGrid, dimBlock >>>(d_out, d_temp, c_kernel, COLS, ROWS, KERNEL_RADIUS, 1);

	cudaMemcpy(_out, d_out, sizeof(float)*COLS*ROWS, cudaMemcpyDeviceToHost);
	
	cudaFree( d_in );
	cudaFree( d_temp );
	cudaFree( d_out );
	cudaFree( d_kernel );
}


int main(int argc, char **argv)
{
	srand ( time(NULL) );

	//int COLS, ROWS;
	float *h_kernel, *h_src, *h_cpuout, *h_gpuout, *h_temp;
	
	//if(argc < 2)
 //   {
 //     printf("Usage : convolution.exe KERNEL_RADIUS checkaccuracy\n");
 //     exit(0);
 //   }

	//COLS = 4096;
	//ROWS = 4096;

	//bool checkaccuracy = (atoi(argv[2]) == 0) ? false : true;
	//int KERNEL_RADIUS = atoi(argv[1]);
	bool checkaccuracy = true;
	int nTest = 5;
 
	h_kernel = (float*)malloc((2*KERNEL_RADIUS+1)*sizeof(float));
	h_src = (float*)malloc(COLS*ROWS*sizeof(float));
	h_cpuout = (float*)malloc(COLS*ROWS*sizeof(float));
	h_gpuout = (float*)malloc(COLS*ROWS*sizeof(float));
	h_temp   = (float*)malloc(COLS*ROWS*sizeof(float));


	// Create 1D Mean filter kernel
	for(int i=0; i<(2*KERNEL_RADIUS+1); i++) h_kernel[i] = 1.0f/(float)(2*KERNEL_RADIUS+1);
	for(int i=0; i<COLS*ROWS; i++) h_src[i] = (float)(rand()%1000);

	if(checkaccuracy)
	{
		clock_t startClock = clock(); 

		kernel_1d_convolution_cpu(h_temp, h_src, h_kernel, 0);
		kernel_1d_convolution_cpu(h_cpuout, h_temp, h_kernel, 1);
		
		clock_t endClock = clock(); 
		printf("CPU time : %ld ms\n", (endClock - startClock)*1000 / (CLOCKS_PER_SEC)); 
	}

	float totalTime = 0;
	double maxErr = 0;
	int testdone = 0;
	printf("Timer starts...\n");

    // test all four GPUs in a single node
 	int select = rand() % 2; // even or odd
	for(int gpuid=0; gpuid<4; gpuid++)
	{  
		cudaSetDevice( 2*gpuid + select );

		printf("GPU ID : %d\n",  2*gpuid + select );

		cudaEvent_t begin, end;

		cudaEventCreate(&begin);
		cudaEventCreate(&end);


		for(int iter=0; iter<nTest; iter++)
		{
			cudaEventRecord(begin, 0);
			
			// GPU convolution
			kernel_2d_separable_convolution_gpu(h_gpuout, h_src, h_kernel);

			testdone++;

			cudaEventRecord(end, 0);
			cudaEventSynchronize(end);
			
			float cTime;
			cudaEventElapsedTime(&cTime, begin, end);

			totalTime += cTime;

			if(checkaccuracy)
			{
				// check sanity
				double err = 0;
				for(int idx=0; idx<COLS*ROWS; idx++)
				{
				  //	printf("CPU : %f, GPU : %f\n", h_gpuout[idx], h_cpuout[idx]);
				  double diff = h_gpuout[idx] - h_cpuout[idx];
				  err += sqrt(diff*diff);			    
				}
				maxErr = max(maxErr, err/(double)(COLS*ROWS));
			}
		}	       

		cudaEventDestroy(begin);
		cudaEventDestroy(end);
	}    

	printf("Filter size : %d x %d, Total average computing time : %f msec", 2*KERNEL_RADIUS+1, 2*KERNEL_RADIUS+1, totalTime/(float)testdone);
	if(checkaccuracy) printf(", Error : %f\n",  maxErr);
	else printf("\n");
  

	free(h_src);
	free(h_cpuout);
	free(h_gpuout);
	free(h_kernel);
	free(h_temp);

	return 0;
}
