#include <iostream>
// 
//#define __CUDACC__
#include <cooperative_groups.h>
//#include <cuda/barrier>
// Local Include
#include "GpuInterface.cuh"

namespace CoGroups = cooperative_groups;

// CUDA Kernel-------------------------------------------------------------------------------------------------------------------------
__global__ void SharedTestKernel(float* D_Output, const float* D_Input, const int W, const int H) {
    __shared__ float S_Input[32];

    S_Input[threadIdx.x % 32] = threadIdx.x % 32;//D_Output[blockDim.x * blockIdx.x + threadIdx.x];
    __syncthreads();

    float x = S_Input[threadIdx.x % 32];
    //D_Output[threadIdx.x] = x;
    D_Output[blockDim.x * blockIdx.x + threadIdx.x] = x;

    //D_Output[blockDim.x * blockIdx.x + threadIdx.x] = S_Input[threadIdx.x % 32];

    //int temp = threadIdx.x % 32;
//#pragma unroll
    //for (int i = 0; i < 1000; i++)
        //temp = S_Input[temp];
    //D_Output[temp] = S_Input[threadIdx.x % 32];// temp;
}

// CUDA Kernel END-------------------------------------------------------------------------------------------------------------------------

// CUDA Kernel Interface-------------------------------------------------------------------------------------------------------------------

#define FORCE_NO_PROFILE_Naive
#define FORCE_NO_PROFILE_Shared
#define FORCE_NO_PROFILE_Tiled
#define FORCE_NO_PROFILE_TiledShared

typedef void (*KernelType)(float*, const float*, const int, const int);
void RunNaive(KernelType KernelFunc, float* D_Output, float* D_Input, const int& W, const int& H, const int IdealBlockx = 2048, const int IdealThreadx = 4, const int IdealThready = 4) {
#if (defined PROFILE) && !(defined FORCE_NO_PROFILE_Naive)
    for (unsigned int BlockNum = 1024; BlockNum <= 4096; BlockNum *= 2)
        for (unsigned int ThreadX = 1; ThreadX <= 1024; ThreadX *= 2)
            for (unsigned int ThreadY = 1; ThreadY <= 1024; ThreadY *= 2)
                if (ThreadX * ThreadY >= 32 && ThreadX * ThreadY <= 1024)
                    KernelFunc << <dim3(BlockNum), dim3(ThreadX, ThreadY) >> > (D_Output, D_Input, W, H);
#else
    KernelFunc << <dim3(IdealBlockx), dim3(IdealThreadx, IdealThready) >> > (D_Output, D_Input, W, H);
#endif
}

// CUDA Kernel Interface END-------------------------------------------------------------------------------------------------------------------

void MemGPU::SharedMemTest(float* D_Output, float* D_Input, const int& W, const int& H) {
    for (int Thread = 32; Thread <= 1024; Thread *= 2)
        SharedTestKernel << <4096, Thread >> > (D_Output, D_Input, W, H);
}