#include <assert.h>
#include "convolutionSeparable.h"

/*==============================================================================*/
// Convolution kernel storage
/*==============================================================================*/

__constant__ float c_VerticalKernel[KERNEL_LENGTH];
__constant__ float c_HorizontalKernel[KERNEL_LENGTH];

void setConvolutionKernel(float *h_Verticalkernel, float *h_Horizontalkernel)
{
	cudaMemcpyToSymbol(c_VerticalKernel, h_Verticalkernel, KERNEL_LENGTH * sizeof(float));
	cudaMemcpyToSymbol(c_HorizontalKernel, h_Horizontalkernel, KERNEL_LENGTH * sizeof(float));
}

/*==============================================================================*/
// Row convolution filter
/*==============================================================================*/

#define ROW_BLOCKDIM_X	16
#define ROW_BLOCKDIM_Y	4
#define ROW_THREAD_STEP 8
#define ROW_APRON_COUNT 1

__global__ void convolutionRowsKernel(float *d_Dst,
		float *d_Src, int imageW, int imageH, int pitch)
{
	__shared__ float s_Data[ROW_BLOCKDIM_Y][(ROW_THREAD_STEP + 2 * ROW_APRON_COUNT) * ROW_BLOCKDIM_X];

	//Offset to the left halo edge
	const int baseX = (blockIdx.x * ROW_THREAD_STEP - ROW_APRON_COUNT) * ROW_BLOCKDIM_X + threadIdx.x;
	const int baseY = blockIdx.y * ROW_BLOCKDIM_Y + threadIdx.y;

	d_Src += baseY * pitch + baseX;
	d_Dst += baseY * pitch + baseX;

	//Load main data
	for(int i = ROW_APRON_COUNT; i < ROW_APRON_COUNT + ROW_THREAD_STEP; i++)
	  s_Data[threadIdx.y][threadIdx.x + i * ROW_BLOCKDIM_X] = d_Src[i * ROW_BLOCKDIM_X];

	//Load left halo
	for(int i = 0; i < ROW_APRON_COUNT; i++)
	  s_Data[threadIdx.y][threadIdx.x + i * ROW_BLOCKDIM_X] = 
	  (baseX >= -i * ROW_BLOCKDIM_X ) ? d_Src[i * ROW_BLOCKDIM_X] : 0;

	//Load right halo
	for(int i = ROW_APRON_COUNT + ROW_THREAD_STEP; i < ROW_APRON_COUNT + ROW_THREAD_STEP + ROW_APRON_COUNT; i++)
	  s_Data[threadIdx.y][threadIdx.x + i * ROW_BLOCKDIM_X] = 
	  (imageW - baseX > i * ROW_BLOCKDIM_X) ? d_Src[i * ROW_BLOCKDIM_X] : 0;

	//Compute and store results
	__syncthreads();

	for(int i = ROW_APRON_COUNT; i < ROW_APRON_COUNT + ROW_THREAD_STEP; i++){
	  float sum = 0;
	
	  for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
		  sum += c_HorizontalKernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROW_BLOCKDIM_X + j];

	  d_Dst[i * ROW_BLOCKDIM_X] = sum;
	}
}

void convolutionRowsGPU(float *d_Dst, float *d_Src,
						int imageW, int imageH)
{
	assert( ROW_BLOCKDIM_X * ROW_APRON_COUNT >= KERNEL_RADIUS );
	assert( imageW % (ROW_THREAD_STEP * ROW_BLOCKDIM_X) == 0 );
	assert( imageH % ROW_BLOCKDIM_Y == 0 );

	dim3 blocks(imageW / (ROW_THREAD_STEP * ROW_BLOCKDIM_X), imageH / ROW_BLOCKDIM_Y);
	dim3 threads(ROW_BLOCKDIM_X, ROW_BLOCKDIM_Y);

	convolutionRowsKernel<<<blocks, threads>>>(d_Dst, d_Src,
									imageW, imageH, imageW);
}



/*==============================================================================*/
// Column convolution filter
/*==============================================================================*/
#define COLUMN_BLOCKDIM_X	16
#define COLUMN_BLOCKDIM_Y	8
#define COLUMN_THREAD_STEP	8
#define COLUMN_APRON_COUNT	1

__global__ void convolutionColumnsKernel(float *d_Dst, float *d_Src,
							int imageW, int imageH, int pitch)
{
	__shared__ float s_Data[COLUMN_BLOCKDIM_X][(COLUMN_THREAD_STEP + 2 * COLUMN_APRON_COUNT) * COLUMN_BLOCKDIM_Y + 1];

	//Offset to the upper halo edge
	const int baseX = blockIdx.x * COLUMN_BLOCKDIM_X + threadIdx.x;
	const int baseY = (blockIdx.y * COLUMN_THREAD_STEP - COLUMN_APRON_COUNT) * COLUMN_BLOCKDIM_Y + threadIdx.y;
	d_Src += baseY * pitch + baseX;
	d_Dst += baseY * pitch + baseX;

	//Main data
	for(int i = COLUMN_APRON_COUNT; i < COLUMN_APRON_COUNT + COLUMN_THREAD_STEP; i++)
		s_Data[threadIdx.x][threadIdx.y + i * COLUMN_BLOCKDIM_Y] = d_Src[i * COLUMN_BLOCKDIM_Y * pitch];

	//Upper halo
	for(int i = 0; i < COLUMN_APRON_COUNT; i++)
		s_Data[threadIdx.x][threadIdx.y + i * COLUMN_BLOCKDIM_Y] = (baseY >= -i * COLUMN_BLOCKDIM_Y) ? d_Src[i * COLUMN_BLOCKDIM_Y * pitch] : 0;

	//Lower halo
	for(int i = COLUMN_APRON_COUNT + COLUMN_THREAD_STEP; i < COLUMN_APRON_COUNT + COLUMN_THREAD_STEP + COLUMN_APRON_COUNT; i++)
		s_Data[threadIdx.x][threadIdx.y + i * COLUMN_BLOCKDIM_Y]= (imageH - baseY > i * COLUMN_BLOCKDIM_Y) ? d_Src[i * COLUMN_BLOCKDIM_Y * pitch] : 0;

	//Compute and store results
	__syncthreads();

	for(int i = COLUMN_APRON_COUNT; i < COLUMN_APRON_COUNT + COLUMN_THREAD_STEP; i++){
		float sum = 0;

		for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
			sum += c_VerticalKernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMN_BLOCKDIM_Y + j];

		d_Dst[i * COLUMN_BLOCKDIM_Y * pitch] = sum;
	}
}

void convolutionColumnsGPU(float *d_Dst, float *d_Src,
							int imageW, int imageH)
{
	assert( COLUMN_BLOCKDIM_Y * COLUMN_APRON_COUNT >= KERNEL_RADIUS );
	assert( imageW % COLUMN_BLOCKDIM_X == 0 );
	assert( imageH % (COLUMN_THREAD_STEP * COLUMN_BLOCKDIM_Y) == 0 );

	dim3 blocks(imageW / COLUMN_BLOCKDIM_X, imageH / (COLUMN_THREAD_STEP * COLUMN_BLOCKDIM_Y));
	dim3 threads(COLUMN_BLOCKDIM_X, COLUMN_BLOCKDIM_Y);

	convolutionColumnsKernel<<<blocks, threads>>>(d_Dst, d_Src,
										imageW, imageH, imageW);
}

