/*
 * Spring 2012
 * ECE519 Massively Parallel Programming
 * 
 * Homework 4 skeleton code provided by Won-Ki Jeong (wkjeong@unist.ac.kr)
 *
 * compile option : nvcc -arch=sm_20
 *
 */
//#include "def.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cutil.h>
#include <cuda_runtime.h>
#include <cutil_inline.h>

//
// 1D convolution using a given kernel
//
// direction 0 : along x
//           1 : along y
//
void kernel_1d_convolution_cpu(float *_out, const float *_in, const float *_kernel, int width, int height, int halfkernelsize, int direction) 
{
   int kernelDim = 2*halfkernelsize+1;

	for(int y=0; y<height; y++)
	{
		for(int x=0; x<width; x++)
		{
			// for each point
			double _sum = 0;
			int i,j;

			for(int k=0; k<kernelDim; k++)
			{
				// along x
				if(direction == 0)
				{
					i = max(min(x - halfkernelsize + k, width-1),0);
					j = y;
				}
				else // along y
				{
					i = x;
					j = max(min(y - halfkernelsize + k, height-1),0);
				}

				_sum += (double)_in[j*width + i]*(double)_kernel[k];
				
			}
			_out[y*width + x] = _sum;
		}
	}
}
__global__
void kernel_1d_convolution_gpu(float *_out, 
							   const float *_in, 
							   const float *_kernel, 
							   int width, 
							   int height, 
							   int halfkernelsize, 
							   int direction)
{
	int kernelDim = 2*halfkernelsize+1;
	int x = blockIdx.x*blockDim.x + threadIdx.x;
	int y = blockIdx.y*blockDim.y + threadIdx.y;

	if(x < width && y < height)
	{
		
		double _sum = 0;
		int i,j;

		for(int k=0; k<kernelDim; k++)
		{
			// along x
			if(direction == 0)
			{
				i = max(min(x - halfkernelsize + k, width-1),0);
				j = y;
			}
			else // along y
			{
				i = x;
				j = max(min(y - halfkernelsize + k, height-1),0);
			}

			_sum += (double)_in[j*width + i]*(double)_kernel[k];
		}
		_out[y*width + x] = _sum;
	}
}

void kernel_2d_separable_convolution_gpu(float *_out, const float *_in, const float *_kernel, int width, int height, int halfkernelsize)
{
	float *d_in, *d_temp, *d_out;
	int kernelDim = 2*halfkernelsize+1;
	int N = width*height;
	const int nStreams = 8;
	cudaStream_t stream_rows[nStreams];
	cudaStream_t stream_cols[nStreams];
	for(int i=0; i<nStreams; i++)
	{
		cudaStreamCreate(&stream_rows[i]);
		cudaStreamCreate(&stream_cols[i]);
	}
	
	int stream_height 			= height/nStreams;
	int stream_size_height		= width*stream_height;
	
	int stream_width 			= width/nStreams;
	int stream_size_width		= height*stream_width;
	
	float *d_kernel;
	cudaMalloc((void**)&d_kernel, sizeof(float)*(kernelDim));
	cudaMemcpy(d_kernel, _kernel,  sizeof(float)*(kernelDim), cudaMemcpyHostToDevice);
	cudaMemcpyToSymbol(d_kernel, _kernel,  sizeof(float)*(kernelDim), cudaMemcpyHostToDevice);
	
	dim3 dimBlock = dim3(32,32,1);
	dim3 dimGrid  = dim3(width/dimBlock.x, stream_height/dimBlock.y, 1);
	
	
	cudaMalloc((void**)&d_in, 	sizeof(float)*(N));
	cudaMalloc((void**)&d_temp, sizeof(float)*(N));
	cudaMalloc((void**)&d_out, 	sizeof(float)*(N));


	for(int i=0; i<nStreams; i++)
	{
		cudaMemcpyAsync(d_in + i*stream_size_height, 
						_in +  i*stream_size_height, 
                        sizeof(float)*stream_size_height, 
						cudaMemcpyHostToDevice, 
                        stream_rows[i]);
        kernel_1d_convolution_gpu<<< dimGrid, dimBlock, kernelDim , stream_rows[i]>>>
                                     (d_temp + i*stream_size_height, 
                                      d_in   + i*stream_size_height, 
                                      d_kernel, 
									  width, 
									  stream_height, 
                                      halfkernelsize,
									  0);
		cudaStreamSynchronize(stream_rows[nStreams-1]);
	// }
	// for(int i=0; i<nStreams; i++)
	// {
        kernel_1d_convolution_gpu<<< dimGrid, dimBlock, kernelDim , stream_cols[i]>>>
                                     (d_out  + i*stream_size_width, 
                                      d_temp + i*stream_size_width, 
                                      d_kernel, 
									  stream_width, 
									  height, 
                                      halfkernelsize,
									  1);
		cudaMemcpyAsync(_out  + i*stream_size_width, 
						d_out + i*stream_size_width, 
                        sizeof(float)*stream_size_width, 
						cudaMemcpyDeviceToHost, 
                        stream_cols[i]);
		cudaStreamSynchronize(stream_cols[nStreams-1]);
	}
	

	/////////////////////////////////////////////////////////////////////
	cudaFree( d_in );
	cudaFree( d_temp );
	cudaFree( d_out );
	cudaFree( d_kernel );
}

int main(int argc, char **argv)
{
	srand ( time(NULL) );

	int width, height;
	float *h_kernel, *h_src, *h_cpuout, *h_gpuout, *h_temp;
	
	if(argc < 2)
    {
      printf("Usage : convolution.exe halfkernelsize checkaccuracy\n");
      exit(0);
    }

	width = 4096;
	height = 4096;

	bool checkaccuracy = (atoi(argv[2]) == 0) ? false : true;
	int halfkernelsize = atoi(argv[1]);
	int nTest = 5;
 
	h_kernel = (float*)malloc((2*halfkernelsize+1)*sizeof(float));
	//h_src = (float*)malloc(width*height*sizeof(float));
	h_cpuout = (float*)malloc(width*height*sizeof(float));
	//h_gpuout = (float*)malloc(width*height*sizeof(float));
	h_temp   = (float*)malloc(width*height*sizeof(float));

	cudaHostAlloc((void**)&h_src, 
				   width*height*sizeof(float), 
                   cudaHostAllocDefault);
	cudaHostAlloc((void**)&h_gpuout, 
				   width*height*sizeof(float), 
                   cudaHostAllocDefault);						
	// Create 1D Mean filter kernel
	for(int i=0; i<(2*halfkernelsize+1); i++) 
		h_kernel[i] = 1.0f/(float)(2*halfkernelsize+1);
	for(int i=0; i<width*height; i++) 
		h_src[i] = (float)(rand()%1000);

	if(checkaccuracy)
	{
		clock_t startClock = clock(); 

		kernel_1d_convolution_cpu(h_temp, h_src, h_kernel, width, height, halfkernelsize, 0);
		kernel_1d_convolution_cpu(h_cpuout, h_temp, h_kernel, width, height, halfkernelsize, 1);
		
		clock_t endClock = clock(); 
		printf("CPU time : %ld ms\n", (endClock - startClock)*1000 / (CLOCKS_PER_SEC)); 
	}

	float totalTime = 0;
	double maxErr = 0;
	int testdone = 0;
	printf("Timer starts...\n");

    // test all four GPUs in a single node
 	int select = rand() % 2; // even or odd
	for(int gpuid=0; gpuid<4; gpuid++)
	{  
		cudaSetDevice( 2*gpuid + select );

		printf("GPU ID : %d\n",  2*gpuid + select );

		cudaEvent_t begin, end;

		cudaEventCreate(&begin);
		cudaEventCreate(&end);


		for(int iter=0; iter<nTest; iter++)
		{
			cudaEventRecord(begin, 0);
			
			// GPU convolution
			kernel_2d_separable_convolution_gpu(h_gpuout, h_src, h_kernel, width, height, halfkernelsize);

			testdone++;

			cudaEventRecord(end, 0);
			cudaEventSynchronize(end);
			
			float cTime;
			cudaEventElapsedTime(&cTime, begin, end);

			totalTime += cTime;

			if(checkaccuracy)
			{
				// check sanity
				double err = 0;
				for(int idx=0; idx<width*height; idx++)
				{
				  //	printf("CPU : %f, GPU : %f\n", h_gpuout[idx], h_cpuout[idx]);
				  double diff = h_gpuout[idx] - h_cpuout[idx];
				  err += sqrt(diff*diff);			    
				}
				maxErr = max(maxErr, err/(double)(width*height));
			}
		}	       

		cudaEventDestroy(begin);
		cudaEventDestroy(end);
	}    

	printf("Filter size : %d x %d, \nTotal average computing time : %f msec", 2*halfkernelsize+1, 2*halfkernelsize+1, totalTime/(float)testdone);
	if(checkaccuracy) printf(",\nError : %f\n",  maxErr);
	else printf("\n");
  
	cudaFreeHost(h_src);
	free(h_cpuout);
	cudaFreeHost(h_gpuout);
	free(h_kernel);
	free(h_temp);

	return 0;
}
