/* Box blur (simple smooth) problem optimized using CUDA and motion blur. June 2014.
 * Felipe Osorio Thome 7696409 <f.o.thome@gmail.com>.
 * Thales Andrade 7573527.
 * Vitor Lima 7573361.
 *
 * This work is based on Convolution paper by Nvidia Corporation. */

#include "smooth.h"

/* --------------- Horizontal Motion Blur --------------- */

__global__ void convolutionRowsKernel(int *d_Result, int *d_Data, int dataW, int dataH) {

  /* First stage: loading data from shared memory. */

  __shared__ int data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];

  /* Current tile and apron limits, relative to row start. */
  int tileStart = blockIdx.x * ROW_TILE_W;
  int tileEnd = tileStart + ROW_TILE_W - 1;
  int apronStart = tileStart - KERNEL_RADIUS;
  int apronEnd = tileEnd + KERNEL_RADIUS;

  /* Restrict tile and apron limits by image borders. */
  int tileEndClamped = min(tileEnd, dataW - 1);
  int apronStartClamped = max(apronStart, 0);
  int apronEndClamped = min(apronEnd, dataW - 1);

  /* Row start index in data array. */
  int rowStart = blockIdx.y * dataW;

  /* Aligned apron start. */
  int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;

  int loadPos = apronStartAligned + threadIdx.x;
  if(loadPos >= apronStart) {
    int smemPos = loadPos - apronStart;

    data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
      d_Data[rowStart + loadPos] : 0;
  }


  /* Divide stages. Waiting all threads finish. */
  __syncthreads();

  /* Second stage: filter computation. */

  int writePos = tileStart + threadIdx.x;
  if(writePos <= tileEndClamped){
    int smemPos = writePos - apronStart;
    int sum = 0;

    /* Unroll loop for better performance. */
    #pragma unroll
    for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
      sum += data[smemPos + k];
    }

    d_Result[rowStart + writePos] = sum / KERNEL_LENGTH;
  }
}

extern "C" void convolutionRowsGPU(int *d_Dst, int *d_Src, int imageW, int imageH) {

  dim3 blocks(iDivUp(imageW, ROW_TILE_W), imageH);
  dim3 threads(KERNEL_RADIUS_ALIGNED + ROW_TILE_W + KERNEL_RADIUS);

  convolutionRowsKernel<<<blocks, threads>>>(d_Dst, d_Src, imageW, imageH);
}

/* --------------- Vertical Motion Blur --------------- */

__global__ void convolutionColumnsKernel(int *d_Result, int *d_Data, int dataW, int dataH, int smemStride, int gmemStride) {

  /* First stage: loading data from shared memory. */

  __shared__ int data[COLUMN_TILE_W * (KERNEL_RADIUS + COLUMN_TILE_H + KERNEL_RADIUS)];

  /* Current tile and apron limits, in rows. */
  int tileStart = blockIdx.y * COLUMN_TILE_H;
  int tileEnd = tileStart + COLUMN_TILE_H - 1;
  int apronStart = tileStart - KERNEL_RADIUS;
  int apronEnd = tileEnd + KERNEL_RADIUS;

  /* Restrict tile and apron limits by image borders. */
  int tileEndClamped = min(tileEnd, dataH - 1);
  int apronStartClamped = max(apronStart, 0);
  int apronEndClamped = min(apronEnd, dataH - 1);

  /* Current column index */
  int columnStart = blockIdx.x * COLUMN_TILE_W + threadIdx.x;

  /* Shared and global memory indices for current column */
  int smemPos = threadIdx.y * COLUMN_TILE_W + threadIdx.x;
  int gmemPos = ((apronStart + threadIdx.y) * dataW) + columnStart;

  for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) {
    data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ?
      d_Data[gmemPos] : 0;

    smemPos += smemStride;
    gmemPos += gmemStride;
  }

  /* Divide stages. Waiting all threads finish. */
  __syncthreads();

  /* Second stage: filter computation. */

  /* Shared and global memory indices for current column. */
  smemPos = ((threadIdx.y + KERNEL_RADIUS) * COLUMN_TILE_W) + threadIdx.x;
  gmemPos = ((tileStart + threadIdx.y) * dataW) + columnStart;

  /* Calculate and output the results. */
  for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
    int sum = 0;

    /* Unroll loop for better performance. */
    #pragma unroll
    for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
      sum += data[smemPos + k * COLUMN_TILE_W];
    }

    d_Result[gmemPos] = sum / KERNEL_LENGTH;
    smemPos += smemStride;
    gmemPos += gmemStride;
  }
}

extern "C" void convolutionColumnsGPU(int *d_Dst, int *d_Src, int imageW, int imageH) {

  dim3 blocks(iDivUp(imageW, COLUMN_TILE_W), iDivUp(imageH, COLUMN_TILE_H));
  dim3 threads(COLUMN_TILE_W, 8);

  convolutionColumnsKernel<<<blocks, threads>>>(d_Dst, d_Src, imageW, imageH, COLUMN_TILE_W * threads.y, imageW * threads.y);
}
