#include "stdafx.h"
#include "GpuPrimitives.h"
#include "RayTraceSystem.h"
#include "VectorMath.h"

using namespace GxLibMath;

namespace RayTracePro
{
	cl::Kernel GpuSort_Int4::_kernel_LocalHistogram, GpuSort_Int4::_kernel_RadixLocalSort, GpuSort_Int4::_kernel_RadixPermute;

	inline int roundUpDiv(int A, int B) { return (A + B - 1) / (B); }

	void GpuSort_Int4::GetSource(StringBuilder & sb)
	{
		sb.WriteString("#define MAX_KV_TYPE (int4)(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0x7FFFFFFF)\n");
		sb.WriteString("#define K_TYPE int\n");
		sb.WriteString("#define KV_TYPE int4\n");
		sb.WriteString("#define K_TYPE_IDENTITY 0\n");
		sb.WriteString(ReadTextFile(L"Sort.cl"));
	}

	void GpuSort_Int4::Init()
	{
		_kernel_RadixLocalSort = GpuDevice::CreateKernel("kernel__radixLocalSort");
		_kernel_LocalHistogram = GpuDevice::CreateKernel("kernel__localHistogram");
		_kernel_RadixPermute = GpuDevice::CreateKernel("kernel__radixPermute");
	}

	void GpuSort_Int4::Sort(const cl::Buffer & bufferIn, const cl::Buffer & bufferOut, int datasetSize)
	{
		bool reallocate = datasetSize > _datasetSize;
		_datasetSize = datasetSize;
		const int _bits = 16;	// sort up to 16 bits
		//---- Prepare some buffers
		if (reallocate)
		{
			unsigned int numBlocks = roundUpDiv(_datasetSize, _workgroupSize * 4);   
			// column size = 2^b = 16
			// row size = numblocks
			_clBuffer_radixHist1 = cl::Buffer(GpuDevice::GetContext(), CL_MEM_READ_WRITE, sizeof(int) * 16 * numBlocks);
			_clBuffer_radixHist2 = cl::Buffer(GpuDevice::GetContext(), CL_MEM_READ_WRITE, (_valueSize + _keySize) * 16 * numBlocks);
		}

		unsigned int numBlocks = roundUpDiv(_datasetSize, _workgroupSize * 4);
		unsigned int Ndiv4 = roundUpDiv(_datasetSize, 4);

		size_t global[1] = {Align(Ndiv4, _workgroupSize)};
		size_t local[1] = {_workgroupSize};

		cl_mem dataA = bufferIn();
		cl_mem dataB = bufferOut();
		for(unsigned int bitOffset = 0; bitOffset < _bits; bitOffset += 4)
		{
			// 1) Each workgroup sorts its tile by using local memory
			radixLocal(global, local, dataA, _clBuffer_radixHist1(), _clBuffer_radixHist2(), bitOffset);
			// 2) Create an histogram of d=2^b digits entries
			localHistogram(global, local, dataA, _clBuffer_radixHist1(), _clBuffer_radixHist2(), bitOffset);
			// 3) Scan the p*2^b = p*(16) entry histogram table. Stored in column-major order, computes global digit offsets.
			GpuDevice::ExclusiveScan(_clBuffer_radixHist1, 0, 16 * numBlocks);
			// 4) Prefix sum results are used to scatter each work-group's elements to their correct position.
			radixPermute(global, local, dataA, dataB, _clBuffer_radixHist1(), _clBuffer_radixHist2(), bitOffset, numBlocks);
			std::swap(dataA, dataB);
		}
	}

	
	void GpuSort_Int4::radixLocal(const size_t* global, const size_t* local, cl_mem data, cl_mem hist, cl_mem blockHists, int bitOffset)
	{
		cl_int clStatus;
		unsigned int a = 0;

		int workgroupSize = 128;

		unsigned int Ndiv = roundUpDiv(_datasetSize, 4); // Each work item handle 4 entries
		size_t global_128[1] = {Align(Ndiv, workgroupSize)};
		size_t local_128[1] = {workgroupSize};

		/*if (_keysOnly)
			clStatus  = clSetKernelArg(_kernel_RadixLocalSort, a++, _keySize * 2 * 4 * workgroupSize, (const void*)NULL);
		else
			clStatus  = clSetKernelArg(_kernel_RadixLocalSort, a++, (_valueSize+_keySize) * 2 * 4 * workgroupSize, (const void*)NULL);// 2 KV array of 128 items (2 for permutations)*/
		clStatus |= clSetKernelArg(_kernel_RadixLocalSort(), a++, sizeof(cl_mem), (const void*)&data);
		clStatus |= clSetKernelArg(_kernel_RadixLocalSort(), a++, sizeof(int), (const void*)&bitOffset);
		clStatus |= clSetKernelArg(_kernel_RadixLocalSort(), a++, sizeof(unsigned int), (const void*)&_datasetSize);
		clStatus |= clEnqueueNDRangeKernel(GpuDevice::GetQueue()(), _kernel_RadixLocalSort(), 1, NULL, global_128, local_128, 0, NULL, NULL);

	}

	void GpuSort_Int4::localHistogram(const size_t* global, const size_t* local, cl_mem data, cl_mem hist, cl_mem blockHists, int bitOffset)
	{
		cl_int clStatus;
		clStatus = clSetKernelArg(_kernel_LocalHistogram(), 0, sizeof(cl_mem), (const void*)&data);
		clStatus |= clSetKernelArg(_kernel_LocalHistogram(), 1, sizeof(int), (const void*)&bitOffset);
		clStatus |= clSetKernelArg(_kernel_LocalHistogram(), 2, sizeof(cl_mem), (const void*)&hist);
		clStatus |= clSetKernelArg(_kernel_LocalHistogram(), 3, sizeof(cl_mem), (const void*)&blockHists);
		clStatus |= clSetKernelArg(_kernel_LocalHistogram(), 4, sizeof(unsigned int), (const void*)&_datasetSize);
		clStatus |= clEnqueueNDRangeKernel(GpuDevice::GetQueue()(), _kernel_LocalHistogram(), 1, NULL, global, local, 0, NULL, NULL);	

	}

	void GpuSort_Int4::radixPermute(const size_t* global, const size_t* local, cl_mem dataIn, cl_mem dataOut, cl_mem histScan, cl_mem blockHists, int bitOffset, unsigned int numBlocks)
	{
		cl_int clStatus;
		clStatus  = clSetKernelArg(_kernel_RadixPermute(), 0, sizeof(cl_mem), (const void*)&dataIn);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 1, sizeof(cl_mem), (const void*)&dataOut);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 2, sizeof(cl_mem), (const void*)&histScan);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 3, sizeof(cl_mem), (const void*)&blockHists);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 4, sizeof(int), (const void*)&bitOffset);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 5, sizeof(unsigned int), (const void*)&_datasetSize);
		clStatus |= clSetKernelArg(_kernel_RadixPermute(), 6, sizeof(unsigned int), (const void*)&numBlocks);
		clStatus |= clEnqueueNDRangeKernel(GpuDevice::GetQueue()(), _kernel_RadixPermute(), 1, NULL, global, local, 0, NULL, NULL);
	}

	/*************************************************************************************************************/
	/* GPU Scan */
	/*************************************************************************************************************/
	//All three kernels run 512 threads per workgroup
	//Must be a power of two
	const uint MAX_BATCH_ELEMENTS = 64 * 1048576;
	const uint MIN_SHORT_ARRAY_SIZE = 4;
	const uint WORKGROUP_SIZE = 256;
	const uint MIN_SCAN_ELEMENTS = 4 * WORKGROUP_SIZE;
	const uint MAX_SHORT_ARRAY_SIZE = 4 * WORKGROUP_SIZE;
	const uint MIN_LARGE_ARRAY_SIZE = 8 * WORKGROUP_SIZE;
	const uint MAX_LARGE_ARRAY_SIZE = 4 * WORKGROUP_SIZE * WORKGROUP_SIZE;

	GpuScan::GpuScan(cl_context cxGPUContext, cl_command_queue cqParamCommandQue)
	{
		cl_int ciErrNum = 0;
		ckScanExclusiveLocal1 = GpuDevice::CreateKernel("scanExclusiveLocal1");
		ckScanExclusiveLocal2 = GpuDevice::CreateKernel("scanExclusiveLocal2");
		ckUniformUpdate = GpuDevice::CreateKernel("uniformUpdate");
		ckFinalUpdate = GpuDevice::CreateKernel("finalUpdate");
		//Check for work group size
		cl_device_id device;
		size_t szScanExclusiveLocal1, szScanExclusiveLocal2, szUniformUpdate;

		ciErrNum |= clGetCommandQueueInfo(cqParamCommandQue, CL_QUEUE_DEVICE, sizeof(cl_device_id), &device, NULL);
		ciErrNum |= clGetKernelWorkGroupInfo(ckScanExclusiveLocal1(),  device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &szScanExclusiveLocal1, NULL);
		ciErrNum |= clGetKernelWorkGroupInfo(ckScanExclusiveLocal2(), device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &szScanExclusiveLocal2, NULL);
		ciErrNum |= clGetKernelWorkGroupInfo(ckUniformUpdate(), device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &szUniformUpdate, NULL);
		if( ciErrNum != CL_SUCCESS || (szScanExclusiveLocal1 < WORKGROUP_SIZE) || (szScanExclusiveLocal2 < WORKGROUP_SIZE) || (szUniformUpdate < WORKGROUP_SIZE) )
		{
			throw "Cannot initialize GPU scan utility.";
		}
		d_Buffer = clCreateBuffer(cxGPUContext, CL_MEM_READ_WRITE, (MAX_BATCH_ELEMENTS / (4 * WORKGROUP_SIZE)) * sizeof(uint), NULL, &ciErrNum);

	}

	GpuScan::~GpuScan()
	{
		clReleaseMemObject(d_Buffer);
	}

	void oclCheckError(cl_int errNum, cl_int succNum)
	{
		if (errNum != succNum)
		{
			throw "OpenCL Failed";
		}
	}

	static uint iSnapUp(uint dividend, uint divisor){
		return ((dividend % divisor) == 0) ? dividend : (dividend - dividend % divisor + divisor);
	}

	static uint factorRadix2(uint& log2L, uint L){
		if(!L){
			log2L = 0;
			return 0;
		}else{
			for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
			return L;
		}
	}

	////////////////////////////////////////////////////////////////////////////////
	// Short scan launcher
	////////////////////////////////////////////////////////////////////////////////
	size_t GpuScan::scanExclusiveLocal1(cl_command_queue cqCommandQueue, cl_mem d_Dst, cl_mem d_Src, uint n, uint size)
	{
		cl_int ciErrNum;
		size_t localWorkSize, globalWorkSize;

		ciErrNum  = clSetKernelArg(ckScanExclusiveLocal1(), 0, sizeof(cl_mem), (void *)&d_Dst);
		ciErrNum |= clSetKernelArg(ckScanExclusiveLocal1(), 1, sizeof(cl_mem), (void *)&d_Src);
		ciErrNum |= clSetKernelArg(ckScanExclusiveLocal1(), 2, 2 * WORKGROUP_SIZE * sizeof(uint), NULL);
		ciErrNum |= clSetKernelArg(ckScanExclusiveLocal1(), 3, sizeof(uint), (void *)&size);
		oclCheckError(ciErrNum, CL_SUCCESS);

		localWorkSize = WORKGROUP_SIZE;
		globalWorkSize = (n * size) / 4;

		ciErrNum = clEnqueueNDRangeKernel(cqCommandQueue, ckScanExclusiveLocal1(), 1, NULL, &globalWorkSize, &localWorkSize, 0, NULL, NULL);
		oclCheckError(ciErrNum, CL_SUCCESS);

		return localWorkSize;
	}

	size_t GpuScan::scanExclusiveShort(cl_command_queue cqCommandQueue, cl_mem d_Dst, cl_mem d_Src, uint batchSize, uint arrayLength)
	{
		//Check power-of-two factorization
		uint log2L;
		uint factorizationRemainder = factorRadix2(log2L, arrayLength);
		oclCheckError( factorizationRemainder == 1, 1);

		//Check supported size range
		oclCheckError( (arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE), 1 );

		//Check total batch size limit
		oclCheckError( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS, 1 );

		//Check all work-groups to be fully packed with data
		oclCheckError( (batchSize * arrayLength) % (4 * WORKGROUP_SIZE) == 0, 1);

		return scanExclusiveLocal1(cqCommandQueue, d_Dst, d_Src, batchSize, arrayLength);
	}

	////////////////////////////////////////////////////////////////////////////////
	// Large scan launcher
	////////////////////////////////////////////////////////////////////////////////
	void GpuScan::scanExclusiveLocal2(
		cl_command_queue cqCommandQueue,
		cl_mem d_Buffer,
		cl_mem d_Dst,
		cl_mem d_Src,
		uint n,
		uint size,
		uint stepSize
		)
	{
			cl_int ciErrNum;
			size_t localWorkSize, globalWorkSize;

			uint elements = n * size;
			ciErrNum  = clSetKernelArg(ckScanExclusiveLocal2(), 0, sizeof(cl_mem), (void *)&d_Buffer);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 1, sizeof(cl_mem), (void *)&d_Dst);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 2, sizeof(cl_mem), (void *)&d_Src);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 3, 2 * WORKGROUP_SIZE * sizeof(uint), NULL);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 4, sizeof(uint), (void *)&elements);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 5, sizeof(uint), (void *)&size);
			ciErrNum |= clSetKernelArg(ckScanExclusiveLocal2(), 6, sizeof(uint), (void *)&stepSize);
			oclCheckError(ciErrNum, CL_SUCCESS);

			localWorkSize = WORKGROUP_SIZE;
			globalWorkSize = iSnapUp(elements, WORKGROUP_SIZE);

			ciErrNum = clEnqueueNDRangeKernel(cqCommandQueue, ckScanExclusiveLocal2(), 1, NULL, &globalWorkSize, &localWorkSize, 0, NULL, NULL);
			oclCheckError(ciErrNum, CL_SUCCESS);
	}

	size_t GpuScan::uniformUpdate(
		cl_command_queue cqCommandQueue,
		cl_mem d_Dst,
		cl_mem d_Buffer,
		uint n
		)
	{
		cl_int ciErrNum;
		size_t localWorkSize, globalWorkSize;

		ciErrNum  = clSetKernelArg(ckUniformUpdate(), 0, sizeof(cl_mem), (void *)&d_Dst);
		ciErrNum |= clSetKernelArg(ckUniformUpdate(), 1, sizeof(cl_mem), (void *)&d_Buffer);
		oclCheckError(ciErrNum, CL_SUCCESS);

		localWorkSize = WORKGROUP_SIZE;
		globalWorkSize = n * WORKGROUP_SIZE;

		ciErrNum = clEnqueueNDRangeKernel(cqCommandQueue, ckUniformUpdate(), 1, NULL, &globalWorkSize, &localWorkSize, 0, NULL, NULL);
		oclCheckError(ciErrNum, CL_SUCCESS);

		return localWorkSize;
	}

	size_t GpuScan::scanExclusiveLarge(
		cl_command_queue cqCommandQueue,
		cl_mem d_Dst,
		cl_mem d_Src,
		uint batchSize,
		uint arrayLength
	){
		//Check power-of-two factorization
		uint log2L;
		uint factorizationRemainder = factorRadix2(log2L, arrayLength);
		oclCheckError( factorizationRemainder == 1, 1);

		//Check supported size range
		oclCheckError( (arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE), 1 );

		//Check total batch size limit
		oclCheckError( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS, 1 );

		scanExclusiveLocal1(
			cqCommandQueue,
			d_Dst,
			d_Src,
			(batchSize * arrayLength) / (4 * WORKGROUP_SIZE),
			4 * WORKGROUP_SIZE
		);

		scanExclusiveLocal2(
			cqCommandQueue,
			d_Buffer,
			d_Dst,
			d_Src,
			batchSize,
			arrayLength / (4 * WORKGROUP_SIZE), 
			4*WORKGROUP_SIZE
		);

		return uniformUpdate(
			cqCommandQueue,
			d_Dst,
			d_Buffer,
			(batchSize * arrayLength) / (4 * WORKGROUP_SIZE)
		);
	}

	int ChooseArraySize(int size)
	{
		int arraySize = MIN_SHORT_ARRAY_SIZE;
		while (arraySize < size)
			arraySize <<= 1;
		if (arraySize < MIN_SCAN_ELEMENTS)
			arraySize = MIN_SCAN_ELEMENTS;
		return arraySize;
	}


	int GpuScan::GetScanArrayAllocationSize(int size)
	{
		int arraySize = ChooseArraySize(size);
		if (arraySize < MAX_LARGE_ARRAY_SIZE)
			return arraySize;
		else
		{
			//arraySize > 4 * WORKGROUP_SIZE * WORKGROUP_SIZE
			arraySize /= WORKGROUP_SIZE; // > 4*WORKGROUP_SIZE, satisfies scanExclusiveLarge
			int blocks = (size+arraySize-1)/arraySize; // blocks <= WORKGROUP_SIZE
			return blocks*arraySize;
		}
	}

	void GpuScan::finalUpdate(cl_command_queue cqCommandQueue, cl_mem d_Dst, cl_mem d_Buffer, uint n, uint stepSize)
	{
		cl_int ciErrNum;
		size_t localWorkSize, globalWorkSize;
		uint log2StepSize = 1;
		while ((1U<<log2StepSize) < stepSize) log2StepSize ++;
		log2StepSize -= 2;
		ciErrNum  = clSetKernelArg(ckFinalUpdate(), 0, sizeof(cl_mem), (void *)&d_Dst);
		ciErrNum |= clSetKernelArg(ckFinalUpdate(), 1, sizeof(cl_mem), (void *)&d_Buffer);
		ciErrNum |= clSetKernelArg(ckFinalUpdate(), 2, sizeof(uint), (void *)&log2StepSize);
		oclCheckError(ciErrNum, CL_SUCCESS);

		localWorkSize = WORKGROUP_SIZE;
		globalWorkSize = n * WORKGROUP_SIZE;

		ciErrNum = clEnqueueNDRangeKernel(cqCommandQueue, ckFinalUpdate(), 1, NULL, &globalWorkSize, &localWorkSize, 0, NULL, NULL);
		oclCheckError(ciErrNum, CL_SUCCESS);
	}
	void GpuScan::Scan(cl_mem bufferOut, cl_mem bufferIn, int size)
	{
		int arraySize = ChooseArraySize(size);
		if (arraySize <= MAX_SHORT_ARRAY_SIZE)
		{
			scanExclusiveShort(GpuDevice::GetQueue()(), bufferOut, bufferIn, 1, arraySize);
		}
		else if (arraySize <= MAX_LARGE_ARRAY_SIZE)
		{
			scanExclusiveLarge(GpuDevice::GetQueue()(), bufferOut, bufferIn, 1, arraySize);
		}
		else if (arraySize <= WORKGROUP_SIZE * MAX_LARGE_ARRAY_SIZE)
		{
			//arraySize > 4 * WORKGROUP_SIZE * WORKGROUP_SIZE
			arraySize /= WORKGROUP_SIZE; // > 4*WORKGROUP_SIZE, satisfies scanExclusiveLarge
			int blocks = (size+arraySize-1)/arraySize; // blocks <= WORKGROUP_SIZE
			scanExclusiveLarge(GpuDevice::GetQueue()(), bufferOut, bufferIn, blocks, arraySize);
			scanExclusiveLocal2(GpuDevice::GetQueue()(), d_Buffer, bufferOut, bufferIn, 1, WORKGROUP_SIZE, arraySize);
			finalUpdate(GpuDevice::GetQueue()(), bufferOut, d_Buffer, arraySize*blocks/(4*WORKGROUP_SIZE), arraySize);
		}
		else
			throw "Scan array too large.";
	}
}
