#include <iostream>
// 
//#define __CUDACC__
#include <cooperative_groups.h>
// Local Include
#include "GpuInterface.cuh"

namespace CoGroups = cooperative_groups;

// Naive: 
//      +-----------+ Every Block Deal a Tile of Result
//      |           |
//      +-----------+
//
// Shared:
//
//      +-----------+ Using Shared Mem to  +---+-----------+---+ And When we Need Input, reading from Shared Memory
//      |           | Cache D_Input        | L |     M     | R |
//      +-----------+                      +---+-----------+---+
//
// Tiled:
// 
//      +-----------+-----------+   Using Tiled for Threads
//      |           |           |   Each thread deals [TileX, TileY] Cells
//      +-----------+-----------+   Each block has [BlockDimX, BlockDimY] Cells
//      |           |           |   So, Each block deals [(TileX * BlockDimX), (TileY * BlockDimY)] Cells
//      +-----------+-----------+   
//          METION  : Mem req is same, but more wavefronts!!!!!!
//                  : Reason is when we use thread for tiled,
//                      first thread  read [0, 0]
//                      second thread read [TileX, 0]
//                    So, in this wrap each Reading Request has stride TileX,
//                      and we need more Wavefronts and Sectors!!!!!
// 
// TiledSwapLoop:
//      
//      0123456789ABCD  Base Method : TileY->TileX->R ---------> TileY->TileX+R->TileX
//      -----------|||  If kernel radius is 5, TileX is 4,
//       -----------||  so each TileX Cells in this Thread's reading 
//        -----------|  be like. So if we read All this in one kernel, 
//         -----------  we turn (KERNEL_RADIUS * TileX) req -to-> KERNEL_RADIS * 2 + TILEX 
// 
// TiledNonmat:
// 
//      Like Tiled, but we don't pattern as rectangle, because of Memory Stride in a warp.
// 

// CUDA Kernel-------------------------------------------------------------------------------------------------------------------------
__global__ void NaiveX(float* D_Output, const float* D_Input, const int W, const int H) {
    //auto Block = CoGroups::this_thread_block();
    const unsigned int ThreadTileSize = blockDim.x * blockDim.y; 
    const unsigned int TileNum = H * W / ThreadTileSize;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int TileNumPerLine = W / blockDim.x;
        const int GlobalX = TileIndex % TileNumPerLine * blockDim.x + threadIdx.x;
        const int GlobalY = TileIndex / TileNumPerLine * blockDim.y + threadIdx.y;
        const int GlobalIndex = GlobalY * W + GlobalX;

        float Sum = 0;

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++)
            if (GlobalX + R >= 0 && GlobalX + R < W)
                Sum += D_Input[GlobalIndex + R] * C_Kernel[R + KERNEL_RADIUS];
        
        D_Output[GlobalIndex] = Sum;
    }
}

__global__ void NaiveY(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int ThreadTileSize = blockDim.x * blockDim.y;
    const unsigned int TileNum = H * W / ThreadTileSize;

    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int TileNumPerLine = W / blockDim.x;
        const int GlobalX = TileIndex % TileNumPerLine * blockDim.x + threadIdx.x;
        const int GlobalY = TileIndex / TileNumPerLine * blockDim.y + threadIdx.y;
        const int GlobalIndex = GlobalY * W + GlobalX;

        float Sum = 0;

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++) 
            if (GlobalY + R >= 0 && GlobalY + R < H)
                Sum += D_Input[GlobalIndex + R * W] * C_Kernel[R + KERNEL_RADIUS];

        D_Output[GlobalIndex] = Sum;
    }
}

__global__ void SharedX(float* D_Output, const float* D_Input, const int W, const int H) {
    // Shared Size -> 
    //      Height     *  (      Left        +       Mid       +       Right      )
    // 
    //    blockDim.y   *  (  KERNEL_RADIUS   +    blockDim.x   +   KERNEL_RADIUS  )
    extern __shared__ float S_Input[];
    const unsigned int SharedW = blockDim.x + KERNEL_RADIUS * 2 + 1;

    //auto Block = CoGroups::this_thread_block();
    const unsigned int ThreadTileSize = blockDim.x * blockDim.y;
    const unsigned int TileNum = H * W / ThreadTileSize;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int TileNumPerLine = W / blockDim.x;
        const int GlobalX = TileIndex % TileNumPerLine * blockDim.x + threadIdx.x;
        const int GlobalY = TileIndex / TileNumPerLine * blockDim.y + threadIdx.y;
        const int GlobalIndex = GlobalY * W + GlobalX;

        // Load Global To Shared
        const int CenterIndex = threadIdx.y * SharedW + KERNEL_RADIUS + threadIdx.x;
        // Mid
        S_Input[CenterIndex] = D_Input[GlobalIndex];
        // Left
        int ThreadIndex = threadIdx.x;
        const int SharedBaseLeft = threadIdx.y * SharedW;
        const int GlobalBaseLeft = GlobalIndex - (int)threadIdx.x - KERNEL_RADIUS;
        while (ThreadIndex < KERNEL_RADIUS) {
            S_Input[SharedBaseLeft  + ThreadIndex] = GlobalX - (int)threadIdx.x - KERNEL_RADIUS + ThreadIndex >= 0 ? D_Input[GlobalBaseLeft  + ThreadIndex] : 0;

            ThreadIndex += blockDim.x;
        }
        // Right
        ThreadIndex = threadIdx.x;
        const int SharedBaseRight = threadIdx.y * SharedW + KERNEL_RADIUS + blockDim.x;
        const int GlobalBaseRight = GlobalIndex - (int)threadIdx.x + blockDim.x;
        while (ThreadIndex < KERNEL_RADIUS) {
            S_Input[SharedBaseRight + ThreadIndex] = GlobalX - (int)threadIdx.x + blockDim.x    + ThreadIndex <  W ? D_Input[GlobalBaseRight + ThreadIndex] : 0;

            ThreadIndex += blockDim.x;
        }
        __syncthreads();
        float Sum = 0;

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++)
            Sum += S_Input[CenterIndex + R] * C_Kernel[R + KERNEL_RADIUS];

        D_Output[GlobalIndex] = Sum;
        __syncthreads();
    }
}

__global__ void SharedY(float* D_Output, const float* D_Input, const int W, const int H) {
    // Shared Size -> 
    //      Width     *  (         Up        +       Mid       +        Down      )
    // 
    //    blockDim.x   *  (  KERNEL_RADIUS   +    blockDim.y   +   KERNEL_RADIUS  )
    extern __shared__ float S_Input[];
    const unsigned int SharedW = blockDim.x;
    //const unsigned int SharedH = blockDim.y + KERNEL_RADIUS * 2 + 1;

    //auto Block = CoGroups::this_thread_block();
    const unsigned int ThreadTileSize = blockDim.x * blockDim.y;
    const unsigned int TileNum = H * W / ThreadTileSize;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int TileNumPerLine = W / blockDim.x;
        const int GlobalX = TileIndex % TileNumPerLine * blockDim.x + threadIdx.x;
        const int GlobalY = TileIndex / TileNumPerLine * blockDim.y + threadIdx.y;
        const int GlobalIndex = GlobalY * W + GlobalX;

        // Load Global To Shared
        const int CenterIndex = (threadIdx.y + KERNEL_RADIUS) * SharedW + threadIdx.x;
        // Mid
        S_Input[CenterIndex] = D_Input[GlobalIndex];
        // Up
        int ThreadIndex = threadIdx.y;
        const int SharedBaseUp = threadIdx.x;
        const int GlobalBaseUp = (GlobalY - (int)threadIdx.y - KERNEL_RADIUS) * W + GlobalX;
        while (ThreadIndex < KERNEL_RADIUS) {
            S_Input[ThreadIndex * SharedW + SharedBaseUp] = GlobalY - (int)threadIdx.y - KERNEL_RADIUS + ThreadIndex >= 0 ? D_Input[GlobalBaseUp + ThreadIndex * W] : 0;

            ThreadIndex += blockDim.y;
        }
        // Down
        ThreadIndex = threadIdx.y;
        const int SharedBaseDown = (blockDim.y + KERNEL_RADIUS) * SharedW + threadIdx.x;
        const int GlobalBaseDown = (GlobalY - (int)threadIdx.y + blockDim.y) * W + GlobalX;
        while (ThreadIndex < KERNEL_RADIUS) {
            S_Input[ThreadIndex * SharedW + SharedBaseDown] = GlobalY - (int)threadIdx.y + blockDim.y + ThreadIndex < H ? D_Input[GlobalBaseDown + ThreadIndex * W] : 0;

            ThreadIndex += blockDim.y;
        }
        __syncthreads();
        float Sum = 0;

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++)
            Sum += S_Input[CenterIndex + R * SharedW] * C_Kernel[R + KERNEL_RADIUS];

        D_Output[GlobalIndex] = Sum;
        __syncthreads();
    }
}

template<int ThreadTileX, int ThreadTileY>
__global__ void TiledX(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
    const unsigned int TileNum = H * W / ThreadTileSize;
    const int TileNumPerLine = W / BlockTileX;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x * ThreadTileX;
        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y * ThreadTileY;

        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;

        float Sum[ThreadTileY][ThreadTileX] = { 0 };

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++) {
            float R_Kernel = C_Kernel[R + KERNEL_RADIUS];
#pragma unroll
            for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                if (GlobalBaseY + TileY >= 0 && GlobalBaseY + TileY < H) {
#pragma unroll
                    for (int TileX = 0; TileX < ThreadTileX; TileX++) {
                        if (GlobalBaseX + TileX + R >= 0 && GlobalBaseX + TileX + R < W) {
                            const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                            Sum[TileY][TileX] += D_Input[GlobalIndex + R] * R_Kernel;
                        }
                    }
                }
            }
        }

#pragma unroll
        for (int TileX = 0; TileX < ThreadTileX; TileX++)
#pragma unroll
            for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                D_Output[GlobalIndex] = Sum[TileY][TileX];
            }
    }
}

template<int ThreadTileX, int ThreadTileY>
__global__ void TiledNonmatX(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
    const unsigned int TileNum = H * W / ThreadTileSize;
    const int TileNumPerLine = W / BlockTileX;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x;
        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y;

        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;

        float Sum[ThreadTileY][ThreadTileX] = { 0 };

#pragma unroll
        for (int TileY = 0; TileY < ThreadTileY; TileY++) {
            const int GlobalY = GlobalBaseY + TileY * blockDim.y;
            if (GlobalY >= 0 && GlobalY < H) {
#pragma unroll
                for (int TileX = 0; TileX < ThreadTileX; TileX++) {
                    float Sum = 0;
                    const int GlobalX = GlobalBaseX + TileX * blockDim.x;
                    const int GlobalIndex = GlobalY * W + GlobalX;
#pragma unroll
                    for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++) {
                        float R_Kernel = C_Kernel[R + KERNEL_RADIUS];
                        if (GlobalX + R >= 0 && GlobalX + R < W) {
                            Sum += D_Input[GlobalIndex + R] * R_Kernel;
                        }
                    }

                    D_Output[GlobalIndex] = Sum;
                }
            }
        }
    }
}

template<int ThreadTileX, int ThreadTileY>
__global__ void TiledSwapLoopX(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
    const unsigned int TileNum = H * W / ThreadTileSize;
    const int TileNumPerLine = W / BlockTileX;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x * ThreadTileX;
        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y * ThreadTileY;

        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;

        float Sum[ThreadTileY][ThreadTileX] = { 0 };

#pragma unroll
        //for (int TileX = 0; TileX < ThreadTileX; TileX++) {
        for (int TileY = 0; TileY < ThreadTileY; TileY++) {
#pragma unroll
            for (int TileX_Plus_R = 0; TileX_Plus_R < ThreadTileX + KERNEL_RADIUS * 2; TileX_Plus_R++)
                if (GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS >= 0 && GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS < W) {
                    float R_Input = D_Input[(GlobalBaseY + TileY) * W + (GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS)];
#pragma unroll
                    //for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                    for (int TileX = 0; TileX < ThreadTileX; TileX++) {
                        const int R = TileX_Plus_R - TileX;
                        if (R >= 0 && R <= KERNEL_RADIUS * 2) {
                            //const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                            Sum[TileY][TileX] += R_Input * C_Kernel[R];
                        }
                    }
                }
        }

#pragma unroll
        for (int TileX = 0; TileX < ThreadTileX; TileX++)
#pragma unroll
            for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                D_Output[GlobalIndex] = Sum[TileY][TileX];
            }
    }
}

//template<int ThreadTileX, int ThreadTileY>
//__global__ void TiledSwapLoopSharedX(float* D_Output, const float* D_Input, const int W, const int H) {
//    extern __shared__ float S_Input[];
//    
//    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
//    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
//    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
//    const unsigned int TileNum = H * W / ThreadTileSize;
//    const int TileNumPerLine = W / BlockTileX;
//
//    const unsigned int SharedW = BlockTileX + KERNEL_RADIUS * 2 + 1;
//    //const unsigned int SharedH = BlockTileY;
//    // Loop For Tile
//    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
//        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x * ThreadTileX;
//        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y * ThreadTileY;
//
//        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;
//
//        // Load Global To Shared
//        const int CenterIndex = threadIdx.y * SharedW + KERNEL_RADIUS + threadIdx.x * ThreadTileX;
//        // Mid
//#pragma unroll
//        for (int TileY = 0; TileY < ThreadTileY; TileY++)
//#pragma unroll
//            for (int TileX = 0; TileX < ThreadTileX; TileX++) {
//                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
//                S_Input[CenterIndex] = D_Input[GlobalIndex];
//            }
//        // Left
//        int ThreadIndex = threadIdx.x;
//        const int SharedBaseLeft = threadIdx.y * SharedW;
//        const int GlobalBaseLeft = GlobalIndex - (int)threadIdx.x - KERNEL_RADIUS;
//        while (ThreadIndex < KERNEL_RADIUS) {
//            S_Input[SharedBaseLeft + ThreadIndex] = GlobalX - (int)threadIdx.x - KERNEL_RADIUS + ThreadIndex >= 0 ? D_Input[GlobalBaseLeft + ThreadIndex] : 0;
//
//            ThreadIndex += blockDim.x;
//        }
//        // Right
//        ThreadIndex = threadIdx.x;
//        const int SharedBaseRight = threadIdx.y * SharedW + KERNEL_RADIUS + BlockTileY;
//        const int GlobalBaseRight = GlobalIndex - (int)threadIdx.x + blockDim.x;
//        while (ThreadIndex < KERNEL_RADIUS) {
//            S_Input[SharedBaseRight + ThreadIndex] = GlobalX - (int)threadIdx.x + blockDim.x + ThreadIndex < W ? D_Input[GlobalBaseRight + ThreadIndex] : 0;
//
//            ThreadIndex += blockDim.x;
//        }
//        __syncthreads();
//
//        float Sum[ThreadTileY][ThreadTileX] = { 0 };
//
//#pragma unroll
//        for (int TileY = 0; TileY < ThreadTileY; TileY++) {
//#pragma unroll
//            for (int TileX_Plus_R = 0; TileX_Plus_R < ThreadTileX + KERNEL_RADIUS * 2; TileX_Plus_R++)
//                if (GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS >= 0 && GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS < W) {
//                    float R_Input = D_Input[(GlobalBaseY + TileY) * W + (GlobalBaseX + TileX_Plus_R - KERNEL_RADIUS)];
//#pragma unroll
//                    for (int TileX = 0; TileX < ThreadTileX; TileX++) {
//                        const int R = TileX_Plus_R - TileX;
//                        if (R >= 0 && R <= KERNEL_RADIUS * 2) {
//                            //const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
//                            Sum[TileY][TileX] += R_Input * C_Kernel[R];
//                        }
//                    }
//                }
//        }
//
//#pragma unroll
//        for (int TileY = 0; TileY < ThreadTileY; TileY++)
//#pragma unroll
//            for (int TileX = 0; TileX < ThreadTileX; TileX++) {
//                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
//                D_Output[GlobalIndex] = Sum[TileY][TileX];
//            }
//    }
//}

template<int ThreadTileX, int ThreadTileY>
__global__ void TiledY(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
    const unsigned int TileNum = H * W / ThreadTileSize;
    const int TileNumPerLine = W / BlockTileX;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x * ThreadTileX;
        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y * ThreadTileY;

        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;

        float Sum[ThreadTileY][ThreadTileX] = { 0 };

#pragma unroll
        for (int R = -KERNEL_RADIUS; R <= KERNEL_RADIUS; R++) {
#pragma unroll
            for (int TileY = 0; TileY < ThreadTileY; TileY++)
#pragma unroll
                for (int TileX = 0; TileX < ThreadTileX; TileX++) {
                    if (GlobalBaseY + TileY + R >= 0 && GlobalBaseY + TileY + R < H) {
                        const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                        Sum[TileY][TileX] += D_Input[GlobalIndex + R * W] * C_Kernel[R + KERNEL_RADIUS];
                    }
                }
        }

#pragma unroll
        for (int TileY = 0; TileY < ThreadTileY; TileY++)
#pragma unroll
            for (int TileX = 0; TileX < ThreadTileX; TileX++) {
                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                D_Output[GlobalIndex] = Sum[TileY][TileX];
            }
    }
}

template<int ThreadTileX, int ThreadTileY>
__global__ void TiledSwapLoopY(float* D_Output, const float* D_Input, const int W, const int H) {
    const unsigned int BlockTileX = min(W, blockDim.x * ThreadTileX);
    const unsigned int BlockTileY = min(H, blockDim.y * ThreadTileY);
    const unsigned int ThreadTileSize = BlockTileX * BlockTileY;
    const unsigned int TileNum = H * W / ThreadTileSize;
    const int TileNumPerLine = W / BlockTileX;
    // Loop For Tile
    for (int TileIndex = blockIdx.x; TileIndex < TileNum; TileIndex += gridDim.x) {
        const int GlobalBaseX = TileIndex % TileNumPerLine * BlockTileX + threadIdx.x * ThreadTileX;
        const int GlobalBaseY = TileIndex / TileNumPerLine * BlockTileY + threadIdx.y * ThreadTileY;

        if (GlobalBaseX >= W || GlobalBaseY >= H) continue;

        float Sum[ThreadTileY][ThreadTileX] = { 0 };

#pragma unroll
        for (int TileX = 0; TileX < ThreadTileX; TileX++) {
#pragma unroll
            for (int TileY_Plus_R = 0; TileY_Plus_R < ThreadTileY + KERNEL_RADIUS * 2; TileY_Plus_R++)
                if (GlobalBaseY + TileY_Plus_R - KERNEL_RADIUS >= 0 && GlobalBaseY + TileY_Plus_R - KERNEL_RADIUS < W) {
                    float R_Input = D_Input[(GlobalBaseY + TileY_Plus_R - KERNEL_RADIUS) * W + (GlobalBaseX + TileX)];
#pragma unroll
                    for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                        const int R = TileY_Plus_R - TileY;
                        if (R >= 0 && R <= KERNEL_RADIUS * 2) {
                            //const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                            Sum[TileY][TileX] += R_Input * C_Kernel[R];
                        }
                    }
                }
        }

#pragma unroll
        for (int TileX = 0; TileX < ThreadTileX; TileX++)
#pragma unroll
            for (int TileY = 0; TileY < ThreadTileY; TileY++) {
                const int GlobalIndex = (GlobalBaseY + TileY) * W + (GlobalBaseX + TileX);
                D_Output[GlobalIndex] = Sum[TileY][TileX];
            }
    }
}


// CUDA Kernel END-------------------------------------------------------------------------------------------------------------------------

// CUDA Kernel Interface-------------------------------------------------------------------------------------------------------------------

#define FORCE_NO_PROFILE_Naive
#define FORCE_NO_PROFILE_Shared
#define FORCE_NO_PROFILE_Tiled

typedef void (*KernelType)(float*, const float*, const int, const int);
void RunNaive(KernelType KernelFunc, float* D_Output, float* D_Input, const int& W, const int& H, const int IdealBlockx = 2048, const int IdealThreadx = 4, const int IdealThready = 4) {
#if (defined PROFILE) && !(defined FORCE_NO_PROFILE_Naive)
    for (unsigned int BlockNum = 1024; BlockNum <= 4096; BlockNum *= 2)
        for (unsigned int ThreadX = 1; ThreadX <= 1024; ThreadX *= 2)
            for (unsigned int ThreadY = 1; ThreadY <= 1024; ThreadY *= 2)
                if (ThreadX * ThreadY >= 32 && ThreadX * ThreadY <= 1024)
                    KernelFunc << <dim3(BlockNum), dim3(ThreadX, ThreadY) >> > (D_Output, D_Input, W, H);
#else
    KernelFunc << <dim3(IdealBlockx), dim3(IdealThreadx, IdealThready) >> > (D_Output, D_Input, W, H);
#endif
}

void RunShared(KernelType KernelFunc, float* D_Output, float* D_Input, const int& W, const int& H, bool bIsX, const int IdealBlockx = 2048, const int IdealThreadx = 4, const int IdealThready = 4) {
#if (defined PROFILE) && !(defined FORCE_NO_PROFILE_Shared)
    for (unsigned int BlockNum = 1024; BlockNum <= 4096; BlockNum *= 2)
        for (unsigned int ThreadX = 1; ThreadX <= 1024; ThreadX *= 2)
            for (unsigned int ThreadY = 1; ThreadY <= 1024; ThreadY *= 2)
                if (ThreadX * ThreadY >= 32 && ThreadX * ThreadY <= 1024)
                    KernelFunc << <dim3(BlockNum), dim3(ThreadX, ThreadY), (ThreadX * ThreadY + (2 * KERNEL_RADIUS + 1) * (bIsX ? ThreadY : ThreadX)) * sizeof(float) >> > (D_Output, D_Input, W, H);
#else
    KernelFunc << < dim3(IdealBlockx), dim3(IdealThreadx, IdealThready), (IdealThreadx * IdealThready + (2 * KERNEL_RADIUS + 1) * (bIsX ? IdealThready : IdealThreadx)) * sizeof(float) >> > (D_Output, D_Input, W, H);
#endif
}

void RunTiled(KernelType KernelFunc, float* D_Output, float* D_Input, const int& W, const int& H, const int IdealBlockx = 2048, const int IdealThreadx = 4, const int IdealThready = 4) {
#if (defined PROFILE) && !(defined FORCE_NO_PROFILE_Tiled)
    for (unsigned int BlockNum = 1024; BlockNum <= 4096; BlockNum *= 2)
        for (unsigned int ThreadX = 1; ThreadX <= 1024; ThreadX *= 2)
            for (unsigned int ThreadY = 1; ThreadY <= 1024; ThreadY *= 2)
                if (ThreadX * ThreadY >= 32 && ThreadX * ThreadY <= 1024)
                    KernelFunc << <dim3(BlockNum), dim3(ThreadX, ThreadY)>> > (D_Output, D_Input, W, H);
#else
    KernelFunc << <dim3(IdealBlockx), dim3(IdealThreadx, IdealThready)>> > (D_Output, D_Input, W, H);
#endif
}

// CUDA Kernel Interface END-------------------------------------------------------------------------------------------------------------------

void ConvGPU::SeparableConvX(float* D_Output, float* D_Input, const int& W, const int& H) {
    RunNaive(NaiveX, D_Output, D_Input, W, H, 4096, 64, 2);
    RunShared(SharedX, D_Output, D_Input, W, H, true, 4096, 128, 1);

    RunTiled(TiledX<2, 2>, D_Output, D_Input, W, H, 4096, 256, 1);
    RunTiled(TiledSwapLoopX<2, 4>, D_Output, D_Input, W, H, 4096, 128, 1);
    RunTiled(TiledNonmatX<4, 2>, D_Output, D_Input, W, H, 4096, 128, 1);
}

void ConvGPU::SeparableConvY(float* D_Output, float* D_Input, const int& W, const int& H) {
    RunNaive(NaiveY, D_Output, D_Input, W, H, 4096, 32, 8);
    RunShared(SharedY, D_Output, D_Input, W, H, false, 4096, 4, 32);

    RunTiled(TiledY<1, 8>, D_Output, D_Input, W, H, 4096, 128, 1);
    RunTiled(TiledSwapLoopY<1, 8>, D_Output, D_Input, W, H, 2048, 16, 16);
}

void setConvolutionKernel(float* H_Kernel) {
    HANDLE_ERROR(cudaMemcpyToSymbol(C_Kernel, H_Kernel, sizeof(float) * KERNEL_LENGTH));
}