/*
 * Copyright 1993-2007 NVIDIA Corporation.  All rights reserved.
 *
 * NOTICE TO USER:   
 *
 * This source code is subject to NVIDIA ownership rights under U.S. and 
 * international Copyright laws.  Users and possessors of this source code 
 * are hereby granted a nonexclusive, royalty-free license to use this code 
 * in individual and commercial software.
 *
 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE 
 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR 
 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH 
 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF 
 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, 
 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 
 * OF USE, DATA OR PROFITS,  WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 
 * OR OTHER TORTIOUS ACTION,  ARISING OUT OF OR IN CONNECTION WITH THE USE 
 * OR PERFORMANCE OF THIS SOURCE CODE.  
 *
 * U.S. Government End Users.   This source code is a "commercial item" as 
 * that term is defined at  48 C.F.R. 2.101 (OCT 1995), consisting  of 
 * "commercial computer  software"  and "commercial computer software 
 * documentation" as such terms are  used in 48 C.F.R. 12.212 (SEPT 1995) 
 * and is provided to the U.S. Government only as a commercial end item.  
 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 
 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the 
 * source code with only those rights set forth herein. 
 *
 * Any use of this source code in individual and commercial software must 
 * include, in the user documentation and internal comments to the code,
 * the above Disclaimer and U.S. Government End Users Notice.
 */



#include "gabor.h"   //gpu configration

//24-bit multiplication is faster on G80,
//but we must be sure to multiply integers
//only within [-8M, 8M - 1] range
#define IMUL(a, b) __mul24(a, b)

__device__ __constant__ float d_Kernel_X[KERNEL_W_X];
__device__ __constant__ float d_Kernel_Y[KERNEL_W_Y];
__device__ __constant__ float d_Kernel_T[KERNEL_W_T];



__device__ void complexMUL(Complexgf& a, Complexgf b){
	Complexgf t = {a.x*b.x - a.y*b.y, a.x*b.y + a.y*b.x};
	a = t;
}



////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////

// anigaussian
template<int i> __device__ float convolutionRow(float *data){
    return
        data[KERNEL_RADIUS_X - i] * d_Kernel_X[i]
        + convolutionRow<i - 1>(data);
}

template<> __device__ float convolutionRow<-1>(float *data){
    return 0;
}

template<int i> __device__ float convolutionColumn(float *data){
    return 
        data[(KERNEL_RADIUS_Y - i) * COLUMN_TILE_W] * d_Kernel_Y[i]
        + convolutionColumn<i - 1>(data);
}

template<> __device__ float convolutionColumn<-1>(float *data){
    return 0;
}


template<int i> __device__ float convolutionColumn3(float *data){
    return 
        data[(KERNEL_RADIUS_T - i) * COLUMN_TILE_W] * d_Kernel_T[i]
        + convolutionColumn3<i - 1>(data);
}

template<> __device__ float convolutionColumn3<-1>(float *data){
    return 0;
}

////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(
    float *d_Result,
    float *d_Data,
    int dataW,
    int dataH
){
    //Data cache
    __shared__ float data[KERNEL_RADIUS_X + ROW_TILE_W + KERNEL_RADIUS_X];

    //Current tile and apron limits, relative to row start
    const int         tileStart = IMUL(blockIdx.x, ROW_TILE_W);
    const int           tileEnd = tileStart + ROW_TILE_W - 1;
    const int        apronStart = tileStart - KERNEL_RADIUS_X;
    const int          apronEnd = tileEnd   + KERNEL_RADIUS_X;

    //Clamp tile and apron limits by image borders
    const int    tileEndClamped = min(tileEnd, dataW - 1);
    const int apronStartClamped = max(apronStart, 0);
    const int   apronEndClamped = min(apronEnd, dataW - 1);

    //Row start index in d_Data[]
    const int          rowStart = IMUL(blockIdx.y, dataW);

    //Aligned apron start. Assuming dataW and ROW_TILE_W are multiples 
    //of half-warp size, rowStart + apronStartAligned is also a 
    //multiple of half-warp size, thus having proper alignment 
    //for coalesced d_Data[] read.
    const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED_X;

    const int loadPos = apronStartAligned + threadIdx.x;
    //Set the entire data cache contents
    //Load global memory values, if indices are within the image borders,
    //or initialize with zeroes otherwise
    if(loadPos >= apronStart){
        const int smemPos = loadPos - apronStart;

        data[smemPos] = 
            ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
            d_Data[rowStart + loadPos] : 0;
    }


    //Ensure the completness of the loading stage
    //because results, emitted by each thread depend on the data,
    //loaded by another threads
    __syncthreads();
    const int writePos = tileStart + threadIdx.x;
    //Assuming dataW and ROW_TILE_W are multiples of half-warp size,
    //rowStart + tileStart is also a multiple of half-warp size,
    //thus having proper alignment for coalesced d_Result[] write.
    if(writePos <= tileEndClamped){
        const int smemPos = writePos - apronStart;
        float sum = 0;

        sum = convolutionRow<2 * KERNEL_RADIUS_X>(data + smemPos);

        d_Result[rowStart + writePos] = sum;
    }
}



////////////////////////////////////////////////////////////////////////////////
// Column convolution filter for second axis
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(
    float *d_Result,
    float *d_Data,
    int dataW,
    int dataH,
    int smemStride,
    int gmemStride
){
    //Data cache
    __shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS_Y + COLUMN_TILE_H + KERNEL_RADIUS_Y)];

    //Current tile and apron limits, in rows
    const int         tileStart = IMUL(blockIdx.y, COLUMN_TILE_H);
    const int           tileEnd = tileStart + COLUMN_TILE_H - 1;
    const int        apronStart = tileStart - KERNEL_RADIUS_Y;
    const int          apronEnd = tileEnd   + KERNEL_RADIUS_Y;

    //Clamp tile and apron limits by image borders
    const int    tileEndClamped = min(tileEnd, dataH - 1);
    const int apronStartClamped = max(apronStart, 0);
    const int   apronEndClamped = min(apronEnd, dataH - 1);

    //Current column index
    const int       columnStart = IMUL(blockIdx.x, COLUMN_TILE_W) + threadIdx.x;

    //Shared and global memory indices for current column
    int smemPos = IMUL(threadIdx.y, COLUMN_TILE_W) + threadIdx.x;
    int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
    //Cycle through the entire data cache
    //Load global memory values, if indices are within the image borders,
    //or initialize with zero otherwise
    for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
        data[smemPos] = 
        ((y >= apronStartClamped) && (y <= apronEndClamped)) ? 
        d_Data[gmemPos] : 0;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }

    //Ensure the completness of the loading stage
    //because results, emitted by each thread depend on the data, 
    //loaded by another threads
    __syncthreads();
    //Shared and global memory indices for current column
    smemPos = IMUL(threadIdx.y + KERNEL_RADIUS_Y, COLUMN_TILE_W) + threadIdx.x;
    gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
    //Cycle through the tile body, clamped by image borders
    //Calculate and output the results
    for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
        float sum = 0;

        sum = convolutionColumn<2 * KERNEL_RADIUS_Y>(data + smemPos);

        d_Result[gmemPos] = sum;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }
}



////////////////////////////////////////////////////////////////////////////////
// Column convolution filter after 45 degree shifting, choosing the necessary blocks 
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU45(
    float *d_Result,
    float *d_Data,
    int dataW,
    int dataH,
    int smemStride,
    int gmemStride,
    int interval,
    int cof
){
   int next = IMUL(blockIdx.y, cof);
   
   if((next <= blockIdx.x) && (blockIdx.x < (next + interval)))
   {

    //Data cache
    __shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS_T + COLUMN_TILE_H + KERNEL_RADIUS_T)];

    //Current tile and apron limits, in rows
    const int         tileStart = IMUL(blockIdx.y, COLUMN_TILE_H);
    const int           tileEnd = tileStart + COLUMN_TILE_H - 1;
    const int        apronStart = tileStart - KERNEL_RADIUS_T;
    const int          apronEnd = tileEnd   + KERNEL_RADIUS_T;

    //Clamp tile and apron limits by image borders
    const int    tileEndClamped = min(tileEnd, dataH - 1);
    const int apronStartClamped = max(apronStart, 0);
    const int   apronEndClamped = min(apronEnd, dataH - 1);

    //Current column index
    const int       columnStart = IMUL(blockIdx.x, COLUMN_TILE_W) + threadIdx.x;

    //Shared and global memory indices for current column
    int smemPos = IMUL(threadIdx.y, COLUMN_TILE_W) + threadIdx.x;
    int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
    //Cycle through the entire data cache
    //Load global memory values, if indices are within the image borders,
    //or initialize with zero otherwise
    for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
        data[smemPos] = 
        ((y >= apronStartClamped) && (y <= apronEndClamped)) ? 
        d_Data[gmemPos] : 0;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }

    //Ensure the completness of the loading stage
    //because results, emitted by each thread depend on the data, 
    //loaded by another threads
    __syncthreads();
    //Shared and global memory indices for current column
    smemPos = IMUL(threadIdx.y + KERNEL_RADIUS_T, COLUMN_TILE_W) + threadIdx.x;
    gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
    //Cycle through the tile body, clamped by image borders
    //Calculate and output the results
    for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
        float sum = 0;

        sum = convolutionColumn3<2 * KERNEL_RADIUS_T>(data + smemPos);

        d_Result[gmemPos] = sum;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }
  }
  else
  {
	return;
  }
}

////////////////////////////////////////////////////////////////////////////////
// Column convolution filter after 135 degree shifting, choosing the necessary blocks 
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU135(
    float *d_Result,
    float *d_Data,
    int dataW,
    int dataH,
    int smemStride,
    int gmemStride,
    int interval,
    int cof
){
   float numblock_x = dataW / COLUMN_TILE_W;
   
   int next = IMUL(blockIdx.y, cof);
   
   float lowbound = numblock_x - 1 - next - interval;
   
   float highbound = numblock_x - 1 - next;
   
   //if((next <= blockIdx.x) && (blockIdx.x < (next + interval)))
    if(( lowbound < blockIdx.x) && (blockIdx.x <= highbound))
   {

    //Data cache
    __shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS_T + COLUMN_TILE_H + KERNEL_RADIUS_T)];

    //Current tile and apron limits, in rows
    const int         tileStart = IMUL(blockIdx.y, COLUMN_TILE_H);
    const int           tileEnd = tileStart + COLUMN_TILE_H - 1;
    const int        apronStart = tileStart - KERNEL_RADIUS_T;
    const int          apronEnd = tileEnd   + KERNEL_RADIUS_T;

    //Clamp tile and apron limits by image borders
    const int    tileEndClamped = min(tileEnd, dataH - 1);
    const int apronStartClamped = max(apronStart, 0);
    const int   apronEndClamped = min(apronEnd, dataH - 1);

    //Current column index
    const int       columnStart = IMUL(blockIdx.x, COLUMN_TILE_W) + threadIdx.x;

    //Shared and global memory indices for current column
    int smemPos = IMUL(threadIdx.y, COLUMN_TILE_W) + threadIdx.x;
    int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
    //Cycle through the entire data cache
    //Load global memory values, if indices are within the image borders,
    //or initialize with zero otherwise
    for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
        data[smemPos] = 
        ((y >= apronStartClamped) && (y <= apronEndClamped)) ? 
        d_Data[gmemPos] : 0;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }

    //Ensure the completness of the loading stage
    //because results, emitted by each thread depend on the data, 
    //loaded by another threads
    __syncthreads();
    //Shared and global memory indices for current column
    smemPos = IMUL(threadIdx.y + KERNEL_RADIUS_T, COLUMN_TILE_W) + threadIdx.x;
    gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
    //Cycle through the tile body, clamped by image borders
    //Calculate and output the results
    for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
        float sum = 0;

        sum = convolutionColumn3<2 * KERNEL_RADIUS_T>(data + smemPos);

        d_Result[gmemPos] = sum;
        smemPos += smemStride;
        gmemPos += gmemStride;
    }
  }
  else
  {
	return;
  }
}





////////////////////////////////////////////////////////////////////////////////
// shift45 for 45 degree diagonal filter
////////////////////////////////////////////////////////////////////////////////

// shift45
__global__ void shift45(float *odata, float* idata, int width, int height, int shift_width)
{
   unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
   unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
   
   if (xIndex < width && yIndex < height)
   {
	   unsigned int index_in  = xIndex + width * yIndex;
	   unsigned int index_out = xIndex + yIndex + shift_width * yIndex;
	   odata[index_out] = idata[index_in]; 
   }
}


////////////////////////////////////////////////////////////////////////////////
// shift135 for 135 degree diagonal filter
////////////////////////////////////////////////////////////////////////////////

//  shift135
__global__ void shift135(float *odata, float* idata, int width, int height, int shift_width)
{
   unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
   unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
   
   if (xIndex < width && yIndex < height)
   {
	   unsigned int index_in  = xIndex + width * yIndex;
	   unsigned int index_out = xIndex + height -1 - yIndex+ shift_width * yIndex;
	   odata[index_out] = idata[index_in]; 
   }
}

////////////////////////////////////////////////////////////////////////////////
// modulation
////////////////////////////////////////////////////////////////////////////////

__global__ void modulation(float* odata_real, float* odata_imag, float* idata, float omegax, float omegay, int width, int height)
{
   unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
   unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
   
   if (xIndex < width && yIndex < height)
   {
      odata_real[yIndex * width + xIndex] = idata[yIndex * width + xIndex] * __cosf(omegax * xIndex + omegay * yIndex);
	  odata_imag[yIndex * width + xIndex] = idata[yIndex * width + xIndex] * (float)(-1.0 * __sinf(omegax * xIndex + omegay * yIndex));
   }
}


__global__ void combine(Complexgf* whole, float* first, float* second, int width, int height)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;

	if (xIndex < width && yIndex < height)
    {
		int t = yIndex * width + xIndex;
		float a = first[t];
		float b = second[t];
		whole[t].x = a;
		whole[t].y = b;
	}
}



__global__ void demodulation(Complexgf* idata, float omegax, float omegay, int width, int height)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;

   if (xIndex < width && yIndex < height)
   {
		Complexgf omega = {__cosf(omegax * (xIndex) + omegay * yIndex), __sinf(omegax * (xIndex) + omegay * yIndex)};
		complexMUL(idata[yIndex * width + xIndex], omega);
	}
}
	

////////////////////////////////////////////////////////////////////////////////
// shift135  choosing necessary blocks
////////////////////////////////////////////////////////////////////////////////

__global__ void shift135choosing(float *odata, float* idata, int width, int height, int shift_width, int interval)
{
   if((blockIdx.x >= blockIdx.y) && (blockIdx.x < blockIdx.y + interval))
   {
   unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
   unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
   
   if (xIndex < width && yIndex < height)
   {
	   unsigned int index_in  = xIndex + width * yIndex;
	   unsigned int index_out = xIndex + height -1 - yIndex+ shift_width * yIndex;
	   odata[index_out] = idata[index_in]; 
   }
   }
   else
   {
   return;
   }
}

////////////////////////////////////////////////////////////////////////////////
// shift45  choosing necessary blocks
////////////////////////////////////////////////////////////////////////////////
// shift45
__global__ void shift45choosing(float *odata, float* idata, int width, int height, int shift_width, int interval)
{
   float numblock_x = width / BLOCK_DIM;
   float lowbound = numblock_x - 1 - blockIdx.y - interval;
   float highbound = numblock_x - 1 - blockIdx.y;
   
   if((lowbound < blockIdx.x) && (blockIdx.x <= highbound))
   {
   unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
   unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
   
   if (xIndex < width && yIndex < height)
   {
	   unsigned int index_in  = xIndex + width * yIndex;
	   unsigned int index_out = xIndex + yIndex + shift_width * yIndex;
	   odata[index_out] = idata[index_in]; 
   }
   }
   else
   {
   return;
   }
}



//cut function
__global__ void cut(float* aftercut, float* beforecut, int width, int height, int ori_width, int ori_height, int pad_kernel_radius_x, int pad_kernel_radius_y)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
	
	if(xIndex < ori_width && yIndex < ori_height)
	{
		aftercut[yIndex * ori_width + xIndex] = beforecut[(yIndex + pad_kernel_radius_y) * width + xIndex + pad_kernel_radius_x];
	}
} 


__global__ void cut(Complexgf* aftercut, Complexgf* beforecut, int width, int height, int ori_width, int ori_height, int pad_kernel_radius_x, int pad_kernel_radius_y)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
	
	if(xIndex < ori_width && yIndex < ori_height)
	{
		float x = beforecut[(yIndex + pad_kernel_radius_y) * width + xIndex + pad_kernel_radius_x].x;
		float y = beforecut[(yIndex + pad_kernel_radius_y) * width + xIndex + pad_kernel_radius_x].y;
		aftercut[yIndex * ori_width + xIndex].x = x;
		aftercut[yIndex * ori_width + xIndex].y = y;
	}
} 

//and function
__global__ void andImage(Complexgf* odata,Complexgf* idata,Complexgf* imask, int width)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    unsigned int Index = yIndex * width + xIndex;
	
	odata[Index].x = 0;
	odata[Index].y = 0;
	float imaskx = imask[Index].x;
	float imasky = imask[Index].y;
	
	if((imaskx*imaskx+imasky*imasky)>5)
	{
		odata[Index].x = idata[Index].x;
		odata[Index].y = idata[Index].y;
	}
} 
//pad function
__global__ void padinside(float *odata, float* idata, int width, int height, int kernel_pad)
{	
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;

	if (xIndex < width && yIndex < height)
	{
		odata[(yIndex + kernel_pad) * (width + 2 * kernel_pad) + xIndex  + kernel_pad] = idata[yIndex * width + xIndex];
	}
}
__global__ void padinsideND(float *odata, float* idata, int width, int height, int kernel_pad)
{	
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    
    unsigned int zIndex = blockIdx.z; 

	if (xIndex < width && yIndex < height)
	{
		odata[zIndex*width*height+(yIndex + kernel_pad) * (width + 2 * kernel_pad) + xIndex  + kernel_pad] = idata[zIndex*width*height+yIndex * width + xIndex];
	}
}

__global__ void padleftright(float *data, int width, int height, int kernel_pad)
{
	unsigned int x_start = kernel_pad / BLOCK_DIM;
	unsigned int x_interval = width / BLOCK_DIM;

	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
	
	if((yIndex >= kernel_pad) && (yIndex < height - kernel_pad))
	{
		if(blockIdx.x < x_start)
		{
			data[yIndex * width + xIndex] = data[yIndex * width + kernel_pad];
		}
		else if((blockIdx.x >= x_interval - x_start) && (blockIdx.x < x_interval))
		{
			data[yIndex * width + xIndex] = data[yIndex * width + width - (kernel_pad + 1)];
		}
	}
}

__global__ void padleftrightND(float *data, int width, int height, int kernel_pad)
{
	unsigned int x_start = kernel_pad / BLOCK_DIM;
	unsigned int x_interval = width / BLOCK_DIM;

	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    
    unsigned int zIndex  = blockIdx.z;
    
	
	if((yIndex >= kernel_pad) && (yIndex < height - kernel_pad))
	{
		if(blockIdx.x < x_start)
		{
			data[zIndex*width*height+yIndex * width + xIndex] = data[zIndex*width*height+yIndex * width + kernel_pad];
		}
		else if((blockIdx.x >= x_interval - x_start) && (blockIdx.x < x_interval))
		{
			data[zIndex*width*height+yIndex * width + xIndex] = data[zIndex*width*height+yIndex * width + width - (kernel_pad + 1)];
		}
	}
}


__global__ void padupdown(float* data, int width, int height, int kernel_pad)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    


	if(yIndex < kernel_pad)
	{
		data[yIndex * width + xIndex] = data[kernel_pad * width + xIndex];
	}
	else if((yIndex >= height - kernel_pad) && (yIndex < height))
	{
		data[yIndex * width + xIndex] = data[(height - (kernel_pad + 1)) * width + xIndex];
	}
	else
	{
		return;
	}
}

__global__ void padupdownND(float* data, int width, int height, int kernel_pad)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    
    unsigned int zIndex  = blockIdx.z;

	if(yIndex < kernel_pad)
	{
		data[zIndex*width*height+yIndex * width + xIndex] = data[zIndex*width*height+kernel_pad * width + xIndex];
	}
	else if((yIndex >= height - kernel_pad) && (yIndex < height))
	{
		data[zIndex*width*height+yIndex * width + xIndex] = data[(zIndex*width*height+height - (kernel_pad + 1)) * width + xIndex];
	}
	else
	{
		return;
	}
}
//pad function, pad the width of the image to multiple of 16

__global__ void padtomultiple16(float *odata, float* idata, int width, int height, int kernel_pad)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;

	if(xIndex < width && yIndex < height)
	{
		if (xIndex < width - kernel_pad)
		{
			odata[yIndex * width + xIndex] = idata[yIndex * (width - kernel_pad) + xIndex];
		}
		else
		{
			odata[yIndex * width + xIndex] = idata[yIndex * (width - kernel_pad) + width - 1 - kernel_pad];
		}
	}
}

__global__ void padtomultiple16ND(float *odata, float* idata, int width, int height, int kernel_pad)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
    
     unsigned int zIndex = blockIdx.z;

	if(xIndex < width && yIndex < height)
	{
		if (xIndex < width - kernel_pad)
		{
			odata[blockIdx.z*width*height+yIndex * width + xIndex] = idata[blockIdx.z*width*height+yIndex * (width - kernel_pad) + xIndex];
		}
		else
		{
			odata[blockIdx.z*width*height+yIndex * width + xIndex] = idata[blockIdx.z*width*height+yIndex * (width - kernel_pad) + width - 1 - kernel_pad];
		}
	}
}

//minus_GPU result = input1 - input2;
__global__ void minus(float* result, float* input1, float* input2, int width, int height)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
	unsigned int Index = yIndex * width + xIndex;

	if (xIndex < width && yIndex < height)
    {
		result[Index] = input1[Index] - input2[Index];
	}
}

__global__ void minusND(float* result, float* input1, float* input2, int width, int height)
{
	unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
    unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
	unsigned int Index = yIndex * width + xIndex;
	
	 unsigned int zIndex = blockIdx.z;

	if (xIndex < width && yIndex < height)
    {
		result[zIndex*width*height+Index] = input1[zIndex*width*height+Index] - input2[zIndex*width*height+Index];
	}
}




