#include "gpuLineDart.h"
#include "cuda_defs.h"
#include "curand_kernel.h"
#include "randUtilGpu.h"
#include "device_launch_parameters.h"

namespace GPULineDartCuda
{
__device__ __constant__ DATA_TYPE c_radius;
__device__ __constant__ DATA_TYPE c_sqRadius;
__device__ __constant__ unsigned int c_dimensions;

__device__ unsigned int d_checkedSamples[NUM_BLOCKS];
__device__ unsigned int d_dartDimensionIndex[NUM_BLOCKS*NUM_THREADS];
__device__ DATA_TYPE2 d_intersectionArray[NUM_BLOCKS*NUM_THREADS*MAX_INTERVAL];
__device__ unsigned int d_numOverlaps[NUM_BLOCKS*NUM_THREADS];
__device__ int d_lock = 0;

__global__ void initRandomStates(curandState* randStates)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;

	curandState* localState = randStates + id;
	curand_init(RANDOM_SEED, id, 0, localState);
}

__global__ void setupGPUDarts(curandState* randStates, GPUDart::GPUInterval* intervals, GPUDart::GPUMDVector refPoints)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
	curandState* localState = randStates + id;

	__shared__ unsigned int sharedDartDimensionIndex[NUM_THREADS / WRAP_SIZE];
	if (threadIdx.x % WRAP_SIZE == 0) // id % 32 == 0
	{
		sharedDartDimensionIndex[threadIdx.x / WRAP_SIZE] = RandUtilGpu::randRange(localState, (unsigned int)DIMENSIONS);
	}
	__syncthreads();
	unsigned int dartDimensionIndex = sharedDartDimensionIndex[threadIdx.x / WRAP_SIZE];
	

	for (unsigned int d = 0; d < DIMENSIONS; ++d)
	{
		if (d != dartDimensionIndex)
		{
			refPoints(id, d) = RandUtilGpu::rand0To1(localState);
		}
	}
	d_dartDimensionIndex[id] = dartDimensionIndex;

	GPUDart::GPUInterval& interval = intervals[id];
	interval.intervalBegins[0] = 0;
	interval.intervalEnds[0] = 1;
	interval.next[0] = 1;
	interval.head = 0;
	interval.tail = 1;
	interval.numIntervals = 1;
	interval.prev[0] = -1;
	interval.prev[1] = 0;
	interval.numUsed = 2;
}

__global__ void updateIntervalsKernel(GPUDart::GPUMDVector samples, GPUDart::GPUInterval* intervals, unsigned int* pNumSamples, GPUDart::GPUMDVector refPoints)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
	unsigned int numSamples = *pNumSamples;
	if (threadIdx.x == 0)
		d_checkedSamples[blockIdx.x] = numSamples;

	unsigned int dartDimensionIndex = d_dartDimensionIndex[id];

	DATA_TYPE localCache[DIMENSIONS];

	__shared__ DATA_TYPE sharedSamples[DIMENSIONS][NUM_THREADS];
	DATA_TYPE refPoint[DIMENSIONS];

	for (unsigned int d = 0; d < DIMENSIONS; ++d)
		refPoint[d] = refPoints(id, d);

	DATA_TYPE2* __restrict__ intersectionArray = d_intersectionArray + id;
	unsigned int numOverlaps = 0;
	
	// load samples to register
	for (unsigned int d = 0; d < DIMENSIONS; ++d)
		localCache[d] = samples(threadIdx.x, d);
	
	for (unsigned int j = 0; j < numSamples; )
	{
		// copy register to shared memory
		for (unsigned int d = 0; d < DIMENSIONS; ++d)
			sharedSamples[d][threadIdx.x] = localCache[d];

		__syncthreads();

		j += NUM_THREADS;

		// load samples to register
		for (unsigned int d = 0; d < DIMENSIONS; ++d)
			localCache[d] = samples(j + threadIdx.x, d);

		int numIterationSamples = min(NUM_THREADS, numSamples - (j - NUM_THREADS));

		for (unsigned int i = 0; i < numIterationSamples; ++i)
		{
			DATA_TYPE distSum = c_sqRadius;

			for (unsigned int d = 0; d < DIMENSIONS; ++d)
			{
				if (d != dartDimensionIndex)
				{
					distSum -= (sharedSamples[d][i] - refPoint[d]) * (sharedSamples[d][i] - refPoint[d]);
				}
			}

			if (distSum >= 0)
			{
				intersectionArray[numOverlaps * NUM_THREADS * NUM_BLOCKS].x = distSum;
				intersectionArray[numOverlaps * NUM_THREADS * NUM_BLOCKS].y = sharedSamples[dartDimensionIndex][i];
				++numOverlaps;
			}
		}
		
		__syncthreads();

	}

	d_numOverlaps[id] = numOverlaps;
}

__global__ void updateIntervals2Kernel(GPUDart::GPUMDVector samples, GPUDart::GPUInterval* intervals, unsigned int* pNumSamples, GPUDart::GPUMDVector refPoints)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;

	GPUDart::GPUInterval& interval = intervals[id];

	DATA_TYPE2* __restrict__ intersectionArray = d_intersectionArray + id;
	unsigned int numOverlaps = d_numOverlaps[id];

	for (unsigned int j = 0; j < numOverlaps; ++j)
	{
		// intersection points with sample
		DATA_TYPE x0 = intersectionArray->y - sqrt(intersectionArray->x);
		DATA_TYPE x1 = intersectionArray->y + sqrt(intersectionArray->x);
		intersectionArray += NUM_BLOCKS * NUM_THREADS;

		short distance = interval.numIntervals;
		short low, up, tempIt;
		low = GPUDart::GPUInterval::lower_bound(interval.intervalBegins, interval.next, interval.head, interval.tail, x0, &distance);
		tempIt = low;
		if (low != interval.head)
		{
			tempIt = interval.prev[low];
			distance += 1;
		}
		up = GPUDart::GPUInterval::upper_bound(interval.intervalEnds, interval.next, tempIt, interval.tail, x1, &distance);

		DATA_TYPE min = interval.intervalBegins[interval.head];
		DATA_TYPE max = interval.intervalEnds[interval.prev[interval.tail]];

		tempIt = up;
		// split an interval
		if (up != interval.tail && interval.next[tempIt] == low)
		{
			DATA_TYPE splitEnd = interval.intervalEnds[up];
			interval.intervalEnds[up] = x0;
			interval.insert(low, x1, splitEnd);
		}
		else
		{
			if (x0 > min)
			{
				tempIt = interval.prev[low];
				if (x0 < interval.intervalEnds[tempIt])
					interval.intervalEnds[tempIt] = x0;
			}
			if (x1 < max)
			{
				if (x1 > interval.intervalBegins[up])
					interval.intervalBegins[up] = x1;
			}
			interval.erase(low, up);
		}
	} 
}

__global__ void pickPointKernel(curandState* randStates, GPUDart::GPUInterval* intervals, GPUDart::GPUMDVector refPoints)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
	GPUDart::GPUInterval& interval = intervals[id];
	unsigned int dartDimensionIndex = d_dartDimensionIndex[id];

	int numIntervals = interval.numIntervals;
	if (numIntervals == 0)
	{
		d_dartDimensionIndex[id] = DIMENSIONS;
		return;
	}

	DATA_TYPE totalLength = 0;
	for (int i = interval.head; i != interval.tail; i = interval.next[i])
	{
		totalLength += interval.intervalEnds[i] - interval.intervalBegins[i];
	}

	curandState* localState = randStates + id;
	DATA_TYPE randPoint = RandUtilGpu::randRange(localState, totalLength);

	for (int i = interval.head; i != interval.tail; i = interval.next[i])
	{
		DATA_TYPE length = interval.intervalEnds[i] - interval.intervalBegins[i];
		if (randPoint <= length)
		{
			refPoints(id, dartDimensionIndex) = interval.intervalBegins[i] + randPoint;
			break;
		}
		randPoint -= length;
	}
}

__global__ void addSamplesKernel(GPUDart::GPUMDVector samples,
	unsigned int *pNumSamples, unsigned int* pFailureCount, GPUDart::GPUMDVector refPoints)
{
	unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;

	__shared__ bool sharedHasSample[NUM_THREADS];
	__shared__ VectorGPU sharedSamples[NUM_THREADS];
	sharedHasSample[threadIdx.x] = (d_dartDimensionIndex[id] != DIMENSIONS);
	for (unsigned int d = 0; d < DIMENSIONS; ++d)
		sharedSamples[threadIdx.x][d] = refPoints(id, d);
	__syncthreads();

	VectorGPU diff;

	// discard in block
	for (unsigned int i = 0; i < NUM_THREADS; ++i)
	{
		if (i < threadIdx.x && sharedHasSample[i] && sharedHasSample[threadIdx.x])
		{
			VectorGPU otherSample = sharedSamples[i];
			VectorGPU thisSample = sharedSamples[threadIdx.x];
			for (int d = 0; d < DIMENSIONS; ++d)
				diff[d] = otherSample[d] - thisSample[d];
			if (diff.getSquaredNorm() < c_sqRadius)
			{
				sharedHasSample[threadIdx.x] = false;
				break;
			}
		}
	}
	__syncthreads();

	if (threadIdx.x == 0)
	{
		unsigned int numCheckedSamples = d_checkedSamples[blockIdx.x];
		unsigned int numBlockSamples = 0;
		unsigned int blockSampleIndices[NUM_THREADS];
		for (unsigned int s = 0; s < NUM_THREADS; ++s)
		{
			if (sharedHasSample[s])
			{
				blockSampleIndices[numBlockSamples++] = s;
			}
		}

		unsigned int numSamples;
		unsigned int numUncheckedSamples;

		// acquire lock
		while (atomicExch(&d_lock, 1) != 0)
		{
		}
		numSamples = *pNumSamples;
		numUncheckedSamples = numSamples;

		if (numUncheckedSamples != numCheckedSamples)
		{
			for (unsigned int i = numCheckedSamples; i < numUncheckedSamples; ++i)
			{
				VectorGPU sample;
				for (unsigned int d = 0; d < DIMENSIONS; ++d)
					sample[d] = samples(i, d);
				for (unsigned int s = 0; s < numBlockSamples; )
				{
					unsigned int sampleIndex = blockSampleIndices[s];
					VectorGPU refPoint = sharedSamples[sampleIndex];

					for (unsigned int d = 0; d < DIMENSIONS; ++d)
						diff[d] = sample[d] - refPoint[d];

					if (diff.getSquaredNorm() < c_sqRadius)
					{
						if (s != numBlockSamples - 1)
							blockSampleIndices[s] = blockSampleIndices[numBlockSamples - 1];
						--numBlockSamples;
					}
					else
					{
						++s;
					}
				}
			}
			numCheckedSamples = numUncheckedSamples;
		}

		for (unsigned int s = 0; s < numBlockSamples; ++s)
		{
			unsigned int sampleIndex = blockSampleIndices[s];
			VectorGPU refPoint = sharedSamples[sampleIndex];
			for (unsigned int d = 0; d < DIMENSIONS; ++d)
				samples(numSamples, d) = refPoint[d];
			++numSamples;
		}

		(*pNumSamples) = numSamples;

		// release lock
		atomicExch(&d_lock, 0);

		if (numSamples - numUncheckedSamples == 0)
			atomicAdd(pFailureCount, NUM_THREADS);
		else
			atomicExch(pFailureCount, 0);
	}
}

};

////////////////////////////////////////////////////////////////////////////////


GPULineDart::GPULineDart()
{

}

GPULineDart::~GPULineDart()
{
	CPUFREE(h_buffer_);

	d_samples_.destroy();
	d_refPoints_.destroy();

	GPUFREE(d_randStates_);
	GPUFREE(d_intervals_);
}

bool GPULineDart::init(unsigned int m, DATA_TYPE radius, unsigned int d)
{
	if (d != DIMENSIONS)
	{
		printf("Dimension mismatch!!!\n");
		return false;
	}

	cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
	
	h_m_ = m;
	h_radius_ = radius;
	h_dimensions_ = d;

	d_samples_.init(MAX_SAMPLES, h_dimensions_);
	d_refPoints_.init(NUM_BLOCKS * NUM_THREADS, h_dimensions_);

	TOGPU_CONSTANT(GPULineDartCuda::c_radius, &radius, sizeof(DATA_TYPE));
	DATA_TYPE sqRadius = radius * radius;
	TOGPU_CONSTANT(GPULineDartCuda::c_sqRadius, &sqRadius, sizeof(DATA_TYPE));
	TOGPU_CONSTANT(GPULineDartCuda::c_dimensions, &h_dimensions_, sizeof(unsigned int));

	GPUMALLOC(&d_randStates_, sizeof(curandState) * NUM_BLOCKS * NUM_THREADS);
	GPUMALLOC(&d_intervals_, sizeof(GPUInterval) * NUM_BLOCKS * NUM_THREADS);
	GPUMALLOC(&d_numSamples, sizeof(unsigned int));
	GPUMALLOC(&d_failureCount, sizeof(unsigned int));

	int i = 0;
	TOGPU((void*)d_numSamples, &i, sizeof(unsigned int));
	TOGPU((void*)d_failureCount, &i, sizeof(unsigned int));

	CPUMALLOC(&h_buffer_, sizeof(unsigned int));

	GPULineDartCuda::initRandomStates <<< NUM_BLOCKS, NUM_THREADS >>> (d_randStates_);

	return true;
}

void GPULineDart::setupDarts()
{
	int numBlocks = useMultipleBlocks_ ? NUM_BLOCKS : 1;
	GPULineDartCuda::setupGPUDarts <<< numBlocks, NUM_THREADS >>> (d_randStates_, d_intervals_, d_refPoints_);

	CUDA_CHECK_ERROR();
}

void GPULineDart::updateIntervals()
{
	int numBlocks = useMultipleBlocks_ ? NUM_BLOCKS : 1;
	GPULineDartCuda::updateIntervalsKernel <<< numBlocks, NUM_THREADS >>> (d_samples_, d_intervals_, d_numSamples, d_refPoints_);

	CUDA_CHECK_ERROR();
}

void GPULineDart::updateIntervals2()
{
	int numBlocks = useMultipleBlocks_ ? NUM_BLOCKS : 1;
	GPULineDartCuda::updateIntervals2Kernel <<< numBlocks, NUM_THREADS >>> (d_samples_, d_intervals_, d_numSamples, d_refPoints_);

	CUDA_CHECK_ERROR();
}

void GPULineDart::pickPoint()
{
	int numBlocks = useMultipleBlocks_ ? NUM_BLOCKS : 1;
	GPULineDartCuda::pickPointKernel <<< numBlocks, NUM_THREADS >>> (d_randStates_, d_intervals_, d_refPoints_);

	CUDA_CHECK_ERROR();
}

void GPULineDart::addSamples()
{
	int numBlocks = useMultipleBlocks_ ? NUM_BLOCKS : 1;
	GPULineDartCuda::addSamplesKernel <<< numBlocks, NUM_THREADS >>> (d_samples_, d_numSamples, d_failureCount, d_refPoints_);

	CUDA_CHECK_ERROR();
}

unsigned int GPULineDart::getNumGeneratedSamples()
{
	
	FROMGPU(h_buffer_, (unsigned int*)d_numSamples, sizeof(unsigned int));
	h_numSamples_ = h_buffer_[0];
	

	return h_numSamples_;
}

void GPULineDart::downloadSamples()
{
	FROMGPU(h_buffer_, (unsigned int*)d_numSamples, sizeof(unsigned int));
	h_numSamples_ = h_buffer_[0];

	if (h_numSamples_ == 0)
		return;

	DATA_TYPE* h_temp = new DATA_TYPE[d_samples_.size_ * h_dimensions_];
	FROMGPU(h_temp, d_samples_.p_, sizeof(DATA_TYPE) * d_samples_.size_ * h_dimensions_);
	VectorGPU* h_samples = new VectorGPU[h_numSamples_];
	for (unsigned int i = 0; i < h_numSamples_; ++i)
	{
		for (unsigned int d = 0; d < h_dimensions_; ++d)
		{
			h_samples[i][d] = h_temp[d * d_samples_.size_ + i];
		}
	}

	/*
	for (int i = 0; i < h_numSamples_; ++i)
	{
		printf("[%d] ", i);
		for (int d = 0; d < h_dimensions_; ++d)
		{
			printf("%f ", h_samples[i].elements_[d]);
		}
		printf("\n");
	}
	*/

	if (validateResults_)
	{
		printf("validation\n");
		for (unsigned int i = 0; i < h_numSamples_; ++i)
		{
			for (unsigned int j = i + 1; j < h_numSamples_; ++j)
			{
				DATA_TYPE sq = (h_samples[i] - h_samples[j]).getSquaredNorm();
				if (sq < h_radius_ * h_radius_ - DATA_TYPE_EPS)
				{
					printf("!!!invalid %d-%d dist : %f\n", i, j, sqrt(sq));
				}
			}
		}
	}

	printf("# of samples : %d\n", h_numSamples_);

	delete[] h_samples;
	delete[] h_temp;
	
}

////////////////////////////////////////////////////////////////////////////////

