#include <cutil_inline.h>
#include "sortingalgorithms.h"

/*
 * U bitonicu je potreba resit, jestli je smer rostouci nebo klesajici.
 */
__device__ inline void compareAndExchange(int &x, int &y, unsigned int dir)
{
    if ((x > y) == dir)
    {
	int tmp = y;
	y = x;
	x = tmp;
    }
}

/*
 * Setridi pole, pokud se vejde do shared pameti.
 */
__global__  void bitonicSortImpl(int * items, unsigned int cnt)
{
    // Kopie dat z globalni pameti do shared pameti
    __shared__ int cudaItems[SHARED_SIZE];
    // Potrebuju jen data podle pozice threadu
    items += blockIdx.x * SHARED_SIZE + threadIdx.x;
    cudaItems[threadIdx.x] = items[0];
    cudaItems[threadIdx.x + (SHARED_SIZE / 2)] = items[SHARED_SIZE / 2];

    // Vnejsi cyklus - urcuje pocet fazi razeni
    for (unsigned int i = 2; i <= cnt; i <<= 1)
    {
	// Vypocet smeru razeni podle pozice threadu a cisla iterace
	unsigned int dir = (threadIdx.x & (i / 2)) == 0;
        // Vnitrni cyklus - Binotic merge
	unsigned int position;
	for (unsigned int j = i / 2; j > 0; j >>= 1)
	{
	    __syncthreads();
            position = 2 * threadIdx.x - (threadIdx.x % j);
	    // Porovnej prvek na pozici indexu threadu s odpovidajicim prvkem v druhe podposloupnosti
	    // Toto porovnani odpovida nejvnitrnejsimu cyklu v paralelnim reseni s OpenMP
	    compareAndExchange(cudaItems[position], cudaItems[position + j], dir);
	}
    }
    __syncthreads();
    // Prekopirovani dat zpet do globalni pameti
    items[0] = cudaItems[threadIdx.x];
    items[SHARED_SIZE / 2] = cudaItems[threadIdx.x + (SHARED_SIZE / 2)];
}

/*
 * Razeni pro pole, ktera se nevejdou do pameti cipu. Pracuje obdobne jako
 * metoda pro razeni primo toho, co se vejde do pameti.
 */
__global__ void bitonicSortImplBig(int * items, unsigned long offset, int cnt = SHARED_SIZE)
{
    __shared__ int cudaItems[SHARED_SIZE];
    items += blockIdx.x * SHARED_SIZE + threadIdx.x + offset;
    cudaItems[threadIdx.x] = items[0];
    cudaItems[threadIdx.x + (SHARED_SIZE / 2)] = items[SHARED_SIZE / 2];

    // Vnejsi cyklus - pocet radicich fazi
    for (unsigned int i = 2; i < SHARED_SIZE; i <<= 1)
    {
	unsigned int dir = (threadIdx.x & (i / 2)) != 0;
        // Vnitrni cylkus - bitonic merge
	for (unsigned int j = i / 2; j > 0; j >>= 1)
	{
	    __syncthreads();
	    unsigned int position = 2 * threadIdx.x - (threadIdx.x % j);
	    compareAndExchange(cudaItems[position], cudaItems[position + j], dir);
	}
    }

    unsigned int dir = blockIdx.x & 1; // Liche rostouci, sude klesajici
    for (unsigned int j = SHARED_SIZE / 2; j > 0; j >>= 1)
    {
	__syncthreads();
	unsigned int position = 2 * threadIdx.x - (threadIdx.x % j);
	compareAndExchange(cudaItems[position], cudaItems[position + j], dir);
    }
    __syncthreads();
    items[0] = cudaItems[threadIdx.x];
    items[SHARED_SIZE / 2] = cudaItems[threadIdx.x + (SHARED_SIZE / 2)];
}

/*
 * Bitonic merge primo nad polem, jeho kopie se nevejde do shared pameti.
 */
__global__  void globalMerge(int * items, unsigned long cnt, unsigned int size, unsigned int stride, unsigned short dir, unsigned long offset)
{
    // Urceni pozice  threadu, ktera je treba pro vypocet smeru merge
    unsigned int threadPos = blockIdx.x * blockDim.x + threadIdx.x + offset;
    if (threadPos >= cnt)
        return;
    unsigned int dirPtr = (threadPos) & ((cnt / 2) - 1);
    unsigned int d = dir ^ ((dirPtr & (size / 2)) != 0);
    unsigned int position = 2 * threadPos - (threadPos & (stride - 1));
    if (position >= cnt || position + stride >= cnt)
    compareAndExchange(items[position], items[position + stride], d);
}

/*
 * Merge zbytku pole, ktery se uz vejde do shared pameti.
 */
__global__ void sharedMerge(int * items, unsigned long cnt, unsigned int size, unsigned short dir, unsigned long offset)
{
    __shared__ int cudaItems[SHARED_SIZE];
    unsigned long threadPos = blockIdx.x * SHARED_SIZE + threadIdx.x + offset;
    if (threadPos >= cnt || threadPos + SHARED_SIZE / 2 >= cnt)
        return;
    items += threadPos;
    cudaItems[threadIdx.x] = items[0];
    cudaItems[threadIdx.x + (SHARED_SIZE / 2)] = items[SHARED_SIZE / 2];

    // Vypocet pozice threadu kvuli urceni smeru razeni, podobne jako vyse
    unsigned int dirPtr = (blockIdx.x * blockDim.x + threadIdx.x + offset) & ((cnt / 2) - 1);
    unsigned int d = dir ^ ((dirPtr & (size / 2)) != 0);
    // Bitonic merge
    for (unsigned int j = SHARED_SIZE / 2; j > 0; j >>= 1)
    {
	__syncthreads();
	unsigned int position = 2 * threadIdx.x - (threadIdx.x & (j - 1));
	compareAndExchange(cudaItems[position], cudaItems[position + j], d);
    }
    __syncthreads();
    items[0] = cudaItems[threadIdx.x];
    items[SHARED_SIZE / 2] = cudaItems[threadIdx.x + (SHARED_SIZE / 2)];
}

extern "C" void bitonicSort(int * items, unsigned long cnt, unsigned int blockCnt)
{
    if (cnt < 2)
	return;
    unsigned int threadCnt = SHARED_SIZE / 2;
    if (cnt <= SHARED_SIZE)
    {
	bitonicSortImpl<<<blockCnt, threadCnt>>>(items, cnt);
    } else
    {
	// Nejdriv seradime na stridacku useky velikosti SHARED_SIZE, ktere se jeste vejdou do sdilene pameti
	// Je potreba davat pocty bloku jako mocniny 2
	unsigned long l = blockCnt * threadCnt * 2;
        for (unsigned long i = 0; i < cnt; i += (l + SHARED_SIZE / 2))
	{
	    bitonicSortImplBig<<<blockCnt, threadCnt>>>(items, i);
	}

	// Pak se zacne provadet bitonic merge ve sdilene pameti s tim, ze jakmile se vejde podposloupnost do shared,
	// provede se merge shared pameti
	unsigned short dir;
	for (unsigned int i = 2 * SHARED_SIZE; i <= cnt; i <<= 1)
	{
	    for (unsigned int j = i / 2; j > 0; j >>= 1)
	    {
		dir = (j & i) == 0;
		if (j >= SHARED_SIZE)
		{
		    for (unsigned long x = 0; x < cnt; x += (l + j))
		    {
			globalMerge<<<blockCnt, threadCnt>>>(items, cnt, i, j, dir, x);
		    }

		}
		else
		{
		    // Pokud se to vejde do pameti, je to ten posledni zbytek,
		    // tak se seradit a muze zacit dalsi kolo, nebo se konci
                    for (unsigned long x = 0; x < cnt; x += (l + j / 2))
		    {
			sharedMerge<<<blockCnt, threadCnt>>>(items, cnt, i, dir, x);
		    }
		    break;
		}
	    }
	}
    }
}
