#include "matrix3D.cuh"
#include <cstdio>
#include "timer.cuh"
#include "cuda_runtime.h"

#define TAG_MASK ( (1U << (32 - LOG2_WARP_SIZE)) - 1U )  // 0000 0111 1111 1111   1111 1111 1111 1111

inline __device__ void increment(volatile unsigned int* _hist, unsigned int data, unsigned int threadTag)
{
    unsigned int count;
    do{
        count = _hist[data] & TAG_MASK;
        count = threadTag | (count + 1);
        _hist[data] = count;
    }while(_hist[data] != count);
}

__global__ void histogram_kernel(const short *source, int size, unsigned int *histList, int binCount)
{
    int thread = blockDim.x * blockIdx.x + threadIdx.x;
    //[(warpCount * 256)];
    extern __shared__ unsigned int _hist[];

    //the start point in array = (thread id [0...32*warps]) became a (warp id*256) 
    //determine the start of block memory for each warp
    unsigned int *warpStart = _hist + (threadIdx.x >> LOG2_WARP_SIZE) * binCount;

#pragma unroll
    //                  the pies of data for current thread
    for(int k = 0; k < (binCount >> LOG2_WARP_SIZE); k++)
        //_hist[position of thread in block + itarator myltiply by 256]
        _hist[threadIdx.x + (k * blockDim.x)] = 0;

    const unsigned int tag = threadIdx.x << (32 - LOG2_WARP_SIZE);

    //    //before writing
    __syncthreads();

    //statring from unic. thread current pozition in execution matrix through all model
    //sifts on size of executed block (warp size * warp count * glid dimecsion)
    for(int pos = thread; pos < size; pos += (blockDim.x * gridDim.x))
    {
        int data = source[pos];
#if __CUDA_ARCH__ >= 200
        increment(warpStart, (unsigned int)((data + 32768) >> 8), tag);
#else
        atomicAdd(warpStart + ((data + 32768) >> 8), 1);
#endif
    }

    __syncthreads();
    for(int bin = threadIdx.x; bin < binCount; bin += blockDim.x){
        int sum = 0;

        for(int i = 0; i < blockDim.x >> LOG2_WARP_SIZE; i++)
            sum += _hist[bin + i * binCount];

        histList[blockIdx.x * binCount + bin] = sum;
    }
}

__global__ void multi_histogram_kernel(const short *source, const short *metric, int size, unsigned int *histList, int binCount)
{
    int thread = blockDim.x * blockIdx.x + threadIdx.x;
    //[(warpCount * 256)];
    extern __shared__ unsigned int _hist[];

    //the start point in array = (thread id [0...32*warps]) became a (warp count*256) 
    //determine the start of block memory for each warp
    unsigned int *warpStart = _hist + (threadIdx.x >> LOG2_WARP_SIZE) * binCount;

//#pragma unroll
    //                  the pies of data for current thread
    for(int k = 0; k < (binCount >> LOG2_WARP_SIZE); k++)
        //_hist[position of thread in block + itarator myltiply by 256]
        _hist[threadIdx.x + (k * blockDim.x)] = 0;

    const unsigned int tag = threadIdx.x << (32 - LOG2_WARP_SIZE);

    //    //before writing
    __syncthreads();

    //statring from unic. thread current pozition in execution matrix through all model
    //sifts on size of executed block (warp size * warp count * glid dimecsion)
    for(int pos = thread; pos < size; pos += (blockDim.x * gridDim.x))
    {
        int data = source[pos];
        int metr = metric[pos];
//#if __CUDA_ARCH__ >= 200
        increment(warpStart, (unsigned int)min( ( (data + 32768) >> 12)*16 + ( (metr + 32768) >> 12),255 ), tag);
//#else
        //atomicAdd( (warpStart + ( (data + 32768) >> 12) * 16 + ((metr + 32768) >> 12)) , 1);
//#endif
    }

    __syncthreads();
    for(int bin = threadIdx.x; bin < binCount; bin += blockDim.x){
        int sum = 0;

        for(int i = 0; i < blockDim.x >> LOG2_WARP_SIZE; i++)
            sum += _hist[bin + i * binCount];

        histList[blockIdx.x * binCount + bin] = sum;
    }
}

__global__ void merge_kernel(const unsigned int *histList, unsigned int *hist, int histSize)
{
    int i = threadIdx.x;
    int bins = blockDim.x;
    hist[i] = 0;
    for(int k = i; k < histSize; k += bins)
    {
        hist[i] += (histList[k] & TAG_MASK);
    }
}

void cuda_histogram2D(const short *material, const short* metric, int sourceSize, unsigned int *hist, int binCount, int warpCount)
{
    int magic_number = 256;

    short* d_metric;
    short* d_material;

    unsigned int*d_hist;
    cudaError_t cudaStatus;

    cudaStatus = cudaMalloc((void**)&d_material, sourceSize * sizeof(short));
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed for model memory request!\n");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&d_metric, sourceSize * sizeof(short));
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed for model memory request!\n");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&d_hist, binCount * sizeof(unsigned int));
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed for histogram memory request!\n");
        goto Error;
    }
    //cory
    cudaStatus = cudaMemcpy(d_metric, metric, sourceSize * sizeof(short), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMemcpy failed for sending model data on GPU!");
        goto Error;
    }

    cudaStatus = cudaMemcpy(d_material, material, sourceSize * sizeof(short), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMemcpy failed for sending model data on GPU!");
        goto Error;
    }

    unsigned int* d_partial_hist;
    cudaStatus = cudaMalloc( (void**)&d_partial_hist, binCount * magic_number * sizeof(unsigned int) );
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed!");
        return;
    }

    Timer t;
    cudaEvent_t start = t.go();
        
    multi_histogram_kernel<<< magic_number, (warpCount << LOG2_WARP_SIZE), (warpCount * 256)*sizeof(unsigned int)>>>
        (d_material, d_metric, sourceSize, d_partial_hist, binCount);
        
    merge_kernel<<<1, binCount>>>(d_partial_hist, d_hist, binCount*magic_number);

    cudaStatus = cudaDeviceSynchronize();
    printf("started with %d threads and %d groups",(warpCount << LOG2_WARP_SIZE), magic_number);
    printf("Histogram calculation complete after : %f Ms \n", t.get(start));

    if (cudaStatus != cudaSuccess)
    {
        printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    cudaStatus = cudaMemcpy(hist, d_hist, binCount * sizeof(unsigned int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(d_metric);
    cudaFree(d_material);
    cudaFree(d_partial_hist);
    cudaFree(d_hist);
}

void cuda_histogram1D(const short *source, int sourceSize,unsigned int *hist, int binCount, int warpCount)
{
    int magic_number = 256;
    short *d_source;
    unsigned int *d_hist;
    cudaError_t cudaStatus;

    cudaStatus = cudaMalloc((void**)&d_source, sourceSize * sizeof(short));
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed for model memory request!\n");
        return;
    }

    cudaStatus = cudaMalloc((void**)&d_hist, binCount * sizeof(unsigned int));
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed for histogram memory request!\n");
        cudaFree(d_source);
        return;
    }

    cudaStatus = cudaMemcpy(d_source, source, sourceSize * sizeof(short), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMemcpy failed for sending model data on GPU!");
        goto Error;
    }

    unsigned int* d_partial_hist;
    cudaStatus = cudaMalloc( (void**)&d_partial_hist, binCount * magic_number * sizeof(unsigned int) );
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMalloc failed!");
        return;
    }

    Timer t;
    cudaEvent_t start = t.go();
        
    histogram_kernel<<< magic_number, (warpCount << LOG2_WARP_SIZE), (warpCount * 256)*sizeof(unsigned int)>>>
        (d_source, sourceSize, d_partial_hist, binCount);
        
    merge_kernel<<<1, binCount>>>(d_partial_hist, d_hist, binCount*magic_number);

    cudaStatus = cudaDeviceSynchronize();
    printf("started with %d threads and %d groups",(warpCount << LOG2_WARP_SIZE), magic_number);
    printf("Histogram calculation complete after : %f Ms \n", t.get(start));

    if (cudaStatus != cudaSuccess)
    {
        printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    cudaStatus = cudaMemcpy(hist, d_hist, binCount * sizeof(unsigned int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess)
    {
        printf("cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(d_source);
    cudaFree(d_hist);
    cudaFree(d_partial_hist);
}
