#ifndef CUDA_KERNEL
#define CUDA_KERNEL

#include <cuda.h>
#include <cuda_runtime_api.h>
#include <driver_functions.h>
#include <vector_functions.h>
#include <channel_descriptor.h>

#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 16


//textures
texture<float, cudaTextureType2D, cudaReadModeElementType> texEvals;
texture<float, cudaTextureType2D, cudaReadModeElementType> texEvecs;


//camera position, declared as symbol
__device__ __constant__ float3 pov;
//face number
__device__ __constant__ int fn;
//edge number
__device__ __constant__ int en;

//scaling factor
__device__ __constant__ float scaleT;
//source 
__device__ __constant__ int source;
//cluster index
__device__ __constant__ int clusterIndex;

//dot product
__device__ float dot ( float3 a, float3 b ) {
	return a.x * b.x + a.y * b.y + a.z * b.z;
}

/**
 * 1st Kernel
 * It took all the faces, their normal and a vertices, and says if they are visible from
 * the specified point of view.
 */
__global__ void testVisibility( float3* faceNormals, float3* facePoints, bool* output ) {
	
	int x = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;        //pixel column
    int y = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;        //pixel row
	
	//Recover the index of the current element
	int index = x + y*blockDim.x*gridDim.x;

	if ( index >= fn ) return;

	float3 d;
	//get vector face-pov
	d.x = facePoints[index].x - pov.x;
	d.y = facePoints[index].y - pov.y;
	d.z = facePoints[index].z - pov.z;
	//get norm
	float norm = sqrtf( d.x*d.x + d.y*d.y + d.z*d.z );
	//normalize d
	d.x = d.x / norm;
	d.y = d.y / norm;
	d.z = d.z / norm;

	output[index] = ( dot(d, faceNormals[index]) < 0.0f );
}


/**
 * 2nd kernel
 * Perform a Xor between the values of visiblity of two faces and store in output.
 */
__global__ void testSilhouette( bool* faceVisibility, int2* edgeFaces, bool* output ) {

	int x = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;        //pixel column
    int y = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;        //pixel row
	
	//Recover the index of the current element
	int index = x + y*blockDim.x*gridDim.x;

	if ( index >= en ) return;

	output[index] = ( faceVisibility[ edgeFaces[index].x ] ^ faceVisibility[ edgeFaces[index].y ]  );
}


/**
 * Kernel for computing diffusion distance
 * Needs evals (stored in texture)
 * evecs (global memory, eigenvectors, size nVert * evecSize)
 * nEvals (number of relevant eigenvalues)
 * output memory
 **/
__global__ void diffusionDistance( float* evecs, bool* mask, int nVert, int evecSize, int nEvals, float* output ) {

	int x = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;        //pixel column
    int y = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;        //pixel row
	
	//Recover the index of the current element
	int index = x + y*blockDim.x*gridDim.x;

	if (index >= nVert) return;

	if (!mask[index]) return;

	float r = 0.0f;

    float coeff;
    float power;

    for ( int i = 0; i < nEvals; i ++ ) {
        coeff = tex2D(texEvals, i, 0);
        power = evecs[source*evecSize+i] - evecs[index*evecSize+i];
        r += ( coeff * power * power );
    }

    output[index] = fminf(output[index], r);
}

/**
 * Kernel for computing diffusion distance AND closest cluster (can they be merged?)
 * Needs evals (stored in texture)
 * evecs (global memory, eigenvectors, size nVert * evecSize)
 * nEvals (number of relevant eigenvalues)
 * cluster indices
 * output memory
 **/
__global__ void closestCluster( float* evecs, bool* mask, int nVert, int evecSize, int nEvals, float* outputDistance, int* outputIndices ) {

	int x = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;        //pixel column
    int y = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;        //pixel row
	
	//Recover the index of the current element
	int index = x + y*blockDim.x*gridDim.x;

	if (index >= nVert) return;

	if (!mask[index]) return;

	float r = 0.0f;

    float coeff;
    float power;

    for ( int i = 0; i < nEvals; i ++ ) {
        coeff = tex2D(texEvals, i, 0);
        power = evecs[source*evecSize+i] - evecs[index*evecSize+i];
        r += ( coeff * power * power );
    }

	//we found a new minimum
	if ( r < outputDistance[index] ) {
		outputDistance[index] = r;
		outputIndices[index] = clusterIndex;
	}

}

/**
 * Function for computing minimum in a sequence
 **/
__global__ void minimumSlice( float* data, int* indices, int dataSize, int sliceSize, float* output, int* outputIndices ) {

	int x = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;        //pixel column
    int y = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;        //pixel row
	
	//Recover the index of the current element
	int index = x + y*blockDim.x*gridDim.x;

	if (index >= dataSize) return;

	int start = index * sliceSize;
	int end = min(dataSize, start+sliceSize);

	float r = data[start];
	int in = indices[start];

	for ( int i = start+1; i < end; i ++ ) {
		if ( data[i] < r ) {
			r = data[i];
			in = indices[i];
		}
	}

	output[index] = r;
	outputIndices[index] = in;
}

//Function that invokes kernel
void computeSilhouetteCUDA( int faceNumber, float3* faceNormals, float3* facePoints,
							int edgeNumber, int2* edgeFaces, bool* output,
							float3 observer ) {

	//copy camera position, face number and edge number to the related symbols
	cudaMemcpyToSymbol( pov, &observer, sizeof(observer), 0, cudaMemcpyHostToDevice );	
	cudaMemcpyToSymbol( fn, &faceNumber, sizeof(faceNumber), 0, cudaMemcpyHostToDevice );	
	cudaMemcpyToSymbol( en, &edgeNumber, sizeof(edgeNumber), 0, cudaMemcpyHostToDevice );	

	//allocate space for temporary data
	bool* faceVisibility;
	size_t faceVisibilitySize = faceNumber * sizeof(bool);
	cudaMalloc((void**) &faceVisibility, faceVisibilitySize);

	//first test
	dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
	dim3 grid(faceNumber/(BLOCK_SIZE_X * BLOCK_SIZE_Y) + 1, 1);

	testVisibility<<<grid, block>>>(faceNormals, facePoints, faceVisibility);

	cudaThreadSynchronize();

	grid.x = edgeNumber/(BLOCK_SIZE_X * BLOCK_SIZE_Y) + 1;
	
	testSilhouette<<<grid, block>>>(faceVisibility, edgeFaces, output);

	cudaThreadSynchronize();

	cudaFree( faceVisibility );
}


/**
 * CUDA kernel for computing distance
 **/
void computeDDistanceCUDA( float* evecsGPU, bool* mask, float* outputGPU, int nVert, int sizeEvals, int nEvals, int src ) {

	//copy source vertex onto symbol
	cudaMemcpyToSymbol( source, &src, sizeof(src), 0, cudaMemcpyHostToDevice );

	//invoke kernels
	dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
	dim3 grid(nVert/(BLOCK_SIZE_X * BLOCK_SIZE_Y) + 1, 1);

	diffusionDistance<<<grid, block>>>(evecsGPU, mask, nVert, sizeEvals, nEvals, outputGPU);

	cudaThreadSynchronize();
}


/**
 * CUDA kernel for computing closest cluster
 **/
void computeClosestClusterCUDA( float* evecsGPU, bool* mask, float* outputDistanceGPU, int* outputIndicesGPU, 
								int nVert, int sizeEvals, int nEvals, int src, int cluster ) {

	//copy source vertex onto symbol
	cudaMemcpyToSymbol( source, &src, sizeof(src), 0, cudaMemcpyHostToDevice );
	cudaMemcpyToSymbol( clusterIndex, &cluster, sizeof(cluster), 0, cudaMemcpyHostToDevice );

	//invoke kernels
	dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
	dim3 grid(nVert/(BLOCK_SIZE_X * BLOCK_SIZE_Y) + 1, 1);

	closestCluster<<<grid, block>>>(evecsGPU, mask, nVert, sizeEvals, nEvals, outputDistanceGPU, outputIndicesGPU);

	cudaThreadSynchronize();
}

void prepareCUDA ( cudaArray* evals_array, cudaChannelFormatDesc& evals_channel, float scaleFactor ) {

	//binding arrays to textures
	cudaBindTextureToArray( &texEvals, evals_array, &evals_channel );

	//copy
	cudaMemcpyToSymbol( scaleT, &scaleFactor, sizeof(scaleFactor), 0, cudaMemcpyHostToDevice );
}

/**
 * CUDA Kernel for finding minimum element in a global array
 **/
float getMinimumElement( float* data, int* indices, int size, int* outputIndex ) {

	int currentSize = size;
	float* currentData = data;
	int* currentIndices = indices;
	float* currentDataGPU;
	int* currentIndicesGPU;
	int nThreads;
	int sliceSize = 100;


	while ( currentSize > 1 ) {

		//alloca spazio per currentData
		cudaMalloc( (void**) &currentDataGPU, currentSize*sizeof(float) );
		//copia currentData su GPU
		cudaMemcpy( currentDataGPU, currentData, currentSize*sizeof(float), cudaMemcpyHostToDevice );
		//libera lo spazio di currentData (now useless)
		free( currentData );

		//alloca spazio per currentIndices
		cudaMalloc( (void**) &currentIndicesGPU, currentSize*sizeof(int) );
		//copia currentIndices su GPU
		cudaMemcpy( currentIndicesGPU, currentIndices, currentSize*sizeof(int), cudaMemcpyHostToDevice );
		//libera la spazio di currentIndices
		free( currentIndices );

		//calcola il numero di threads necessari per coprire l'array
		nThreads = (currentSize/sliceSize) + (currentSize%sliceSize == 0? 0 : 1);
		//calcola grid e block
		dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
		dim3 grid(nThreads/(BLOCK_SIZE_X * BLOCK_SIZE_Y) + 1, 1);

		//alloca memoria per l'output
		float* tmpGPU;
		int* indexGPU;
		cudaMalloc( (void**) &tmpGPU, nThreads*sizeof(float) );
		cudaMalloc( (void**) &indexGPU, nThreads*sizeof(int) );

		//lancia kernel
		minimumSlice<<<grid, block>>>( currentDataGPU, currentIndicesGPU, currentSize, sliceSize, tmpGPU, indexGPU );

		//copia memoria da gpu su disco
		float* tmp = (float*) calloc ( nThreads, sizeof(float) );
		cudaMemcpy( tmp, tmpGPU, nThreads*sizeof(float), cudaMemcpyDeviceToHost );

		int* tmpIndices = (int*) calloc (nThreads, sizeof(int) );
		cudaMemcpy( tmpIndices, indexGPU, nThreads*sizeof(int), cudaMemcpyDeviceToHost );

		//libera spazio su GPU
		cudaFree( tmpGPU );
		cudaFree( currentIndicesGPU );
		cudaFree( currentDataGPU );

		//update currentSize and currentData
		currentSize = nThreads;
		currentData = tmp;
		currentIndices = tmpIndices;

	}

	*outputIndex = *currentIndices;
	return *currentData;
}

#endif
