/**
  *
  * Date         11 june 2009
  * ====
  *
  * Authors      Vincent Garcia
  * =======      Eric    Debreuve
  *              Michel  Barlaud
  *
  * Description  Given a reference point set and a query point set, the program returns
  * ===========  the distance between each query point and its k-th nearest neighbors in
  *              the reference point set. Only the distance is provided. The computation
  *              is performed using the API NVIDIA CUDA.
  *
  * Paper        Fast k nearest neighbor search using GPU
  * =====
  *
  * BibTeX       @INPROCEEDINGS{2008_garcia_cvgpu,
  * ======         author = {V. Garcia and E. Debreuve and M. Barlaud},
  *                title = {Fast k nearest neighbor search using GPU},
  *                booktitle = {CVPR Workshop on Computer Vision on GPU},
  *                year = {2008},
  *                address = {Anchorage, Alaska, USA},
  *                month = {June}
  *              }
  *
  */


// Includes
#include <stdio.h>
#include <tchar.h>
#include <cuda.h>
#include "CudaUtils.h"
#include "knn.h"


// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES       262144
#define MAX_TEXTURE_WIDTH_IN_BYTES     65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES    32768
#define MAX_PART_OF_FREE_MEMORY_USED   0.9
#define BLOCK_DIM                      16


// Texture containing the reference points (if it is possible)
texture<float, 2, cudaReadModeElementType> texA;




//-----------------------------------------------------------------------------------------------//
//                                            KERNELS                                            //
//-----------------------------------------------------------------------------------------------//



/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA    width of the matrix A = number of points in A
* @param B     pointer on the matrix B
* @param wB    width of the matrix B = number of points in B
* @param pB    pitch of matrix B given in number of columns
* @param dim   dimension of points = height of matrices A and B
* @param AB    pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceTexture(int wA, float * B, int wB, int pB, int dim, float* AB){
	unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
	unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
	if ( xIndex<wB && yIndex<wA ){
		float ssd = 0;
		for (int i=0; i<dim; i++){
			float tmp  = tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ];
			ssd += tmp * tmp;
		}
		AB[yIndex * pB + xIndex] = ssd;
	}
}


/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A     pointer on the matrix A
* @param wA    width of the matrix A = number of points in A
* @param pA    pitch of matrix A given in number of columns
* @param B     pointer on the matrix B
* @param wB    width of the matrix B = number of points in B
* @param pB    pitch of matrix B given in number of columns
* @param dim   dimension of points = height of matrices A and B
* @param AB    pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim,  float* AB){

	// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
	__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
	__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];

	// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
	__shared__ int begin_A;
	__shared__ int begin_B;
	__shared__ int step_A;
	__shared__ int step_B;
	__shared__ int end_A;

	// Thread index
	int tx = threadIdx.x;
	int ty = threadIdx.y;

	// Other variables
	float tmp;
	float ssd = 0;

	// Loop parameters
	begin_A = BLOCK_DIM * blockIdx.y;
	begin_B = BLOCK_DIM * blockIdx.x;
	step_A  = BLOCK_DIM * pA;
	step_B  = BLOCK_DIM * pB;
	end_A   = begin_A + (dim-1) * pA;

	// Conditions
	int cond0 = (begin_A + tx < wA); // used to write in shared memory
	int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
	int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix

	// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
	for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {

		// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
		if (a/pA + ty < dim){
			shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
			shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
		}
		else{
			shared_A[ty][tx] = 0;
			shared_B[ty][tx] = 0;
		}

		// Synchronize to make sure the matrices are loaded
		__syncthreads();

		// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
		if (cond2 && cond1){
			for (int k = 0; k < BLOCK_DIM; ++k){
				tmp = shared_A[k][ty] - shared_B[k][tx];
				ssd += tmp*tmp;
			}
		}

		// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
		__syncthreads();
	}

	// Write the block sub-matrix to device memory; each thread writes one element
	if (cond2 && cond1)
		AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}


__constant__ float g_MaxDistance;
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist        distance matrix
* @param dist_pitch  pitch of the distance matrix given in number of columns
* @param ind         index matrix
* @param ind_pitch   pitch of the index matrix given in number of columns
* @param width       width of the distance matrix and of the index matrix
* @param height      height of the distance matrix and of the index matrix
* @param k           number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){

	// Variables
	int l,i,j;
	float *p_dist;
	int   *p_ind;
	float v_dist;
	int   i_dist, i_max;
	float max_value;
	unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;

	if (xIndex<width){

		// Pointer shift and max value
		p_dist    = dist + xIndex;
		p_ind     = ind  + xIndex;
		max_value = p_dist[0];
// 		max_value = fminf(p_dist[0], g_MaxDistance);

		// Initialize the indexes
		p_ind[0] = 1;

		// Part 1 : sort kth firt elementZ
		for (l=1; l<k; l++){
			i_dist = l * dist_pitch;
			v_dist = p_dist[i_dist];
			if (v_dist<max_value){
				i=0; while ( i<l && p_dist[i*dist_pitch]<=v_dist ) i++;
				for (j=l; j>i; j--){
					p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
					p_ind[j*ind_pitch]   = p_ind[(j-1)*ind_pitch];
				}
				p_dist[i*dist_pitch] = v_dist;
				p_ind[i*ind_pitch]   = l+1;
			}
			max_value = p_dist[i_dist];
// 			max_value = fminf(p_dist[i_dist], g_MaxDistance);
		}

		// Part 2 : insert element in the k-th first lines
		i_max = (k-1)*dist_pitch;
		for (l=k; l<height; l++){
			i_dist = l * dist_pitch;
			v_dist = p_dist[i_dist];
			if (v_dist<max_value){
				i=0; while ( i<k && p_dist[i*dist_pitch]<=v_dist ) i++;
				for (j=k-1; j>i; j--){
					p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
					p_ind[j*ind_pitch]   = p_ind[(j-1)*ind_pitch];
				}
				p_dist[i*dist_pitch] = v_dist;
				p_ind[i*ind_pitch]   = l+1;
				max_value            = p_dist[i_max];
// 				max_value            = fminf(p_dist[i_max], g_MaxDistance);
			}
		}
	}
}

//-----------------------------------------------------------------------------------------------//
//                                   K-th NEAREST NEIGHBORS                                      //
//-----------------------------------------------------------------------------------------------//



/**
  * Prints the error message return during the memory allocation.
  *
  * @param error        error value return by the memory allocation function
  * @param memorySize   size of memory tried to be allocated
  */
void printErrorMessage(cudaError_t error, int memorySize){
    _tprintf(_T("==================================================\n"));
    _tprintf(_T("MEMORY ALLOCATION ERROR  : %s\n"), cudaGetErrorString(error));
    _tprintf(_T("Whished allocated memory : %d\n"), memorySize);
    _tprintf(_T("==================================================\n"));
}

float        *query_dev;
float        *ref_dev;
float        *dist_dev;
int          *ind_dev;
cudaArray    *ref_array;

size_t       query_pitch;
size_t	     query_pitch_in_bytes;
size_t       ind_pitch;
size_t       ind_pitch_in_bytes;
size_t       max_nb_query_traited;
size_t       actual_nb_query_width;

size_t       ref_pitch;
size_t       ref_pitch_in_bytes;

unsigned int use_texture;


void SetKnnParams(float p_MaxDist)
{
	cudaMemcpyToSymbol(g_MaxDistance, &p_MaxDist, sizeof(float));
}

// ref_host, ref_width --> our points
// query_width --> number of queries (# of estimations)
// height --> dimension - always 3
void PreparekNNSearch(float* ref_host, int ref_width, int query_width, int height, int k)
{
	unsigned int size_of_float = sizeof(float);
	unsigned int size_of_int   = sizeof(int);
	cudaError_t  result;
	unsigned int memory_total;
	unsigned int memory_free;

	// Check if we can use texture memory for reference points
	use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );

	// CUDA Initialisation
	cuInit(0);

	// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
	CUcontext cuContext;
	CUdevice  cuDevice=0;
	cuCtxCreate(&cuContext, 0, cuDevice);
	cuMemGetInfo(&memory_free, &memory_total);
	cuCtxDetach (cuContext);

	// Determine maximum number of query that can be treated
	max_nb_query_traited = (size_t)( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * height);
	max_nb_query_traited = __min( query_width, (max_nb_query_traited / 16) * 16 );

	// Allocation of global memory for query points and for distances
	result = CudaMallocPitchRec( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
	if (result){
		printErrorMessage(result, (int)(max_nb_query_traited*size_of_float*(height+ref_width)));
		return;
	}
	query_pitch = query_pitch_in_bytes/size_of_float;
	dist_dev    = query_dev + height * query_pitch;

	// Allocation of global memory for indexes	
	result = CudaMallocPitchRec( (void **) &ind_dev, &ind_pitch_in_bytes, max_nb_query_traited * size_of_int, k);
	if (result){
		CudaFreeRec(query_dev);
		printErrorMessage(result, (int)(max_nb_query_traited*size_of_int*k));
		return;
	}
	ind_pitch = ind_pitch_in_bytes/size_of_int;

	// Allocation of memory (global or texture) for reference points
	if (use_texture){

		// Allocation of texture memory
		cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
		result = CudaMallocArrayRec( &ref_array, &channelDescA, ref_width, height );
		if (result){
			printErrorMessage(result, ref_width*height*size_of_float);
			CudaFreeRec(ind_dev);
			CudaFreeRec(query_dev);
			return;
		}
		cudaMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, cudaMemcpyHostToDevice );

		// Set texture parameters and bind texture to array
		texA.addressMode[0] = cudaAddressModeClamp;
		texA.addressMode[1] = cudaAddressModeClamp;
		texA.filterMode     = cudaFilterModePoint;
		texA.normalized     = 0;
		cudaBindTextureToArray(texA, ref_array);

	}
	else{

		// Allocation of global memory
		result = CudaMallocPitchRec( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
		if (result){
			printErrorMessage(result,  ref_width*size_of_float*height);
			CudaFreeRec(ind_dev);
			CudaFreeRec(query_dev);
			return;
		}
		ref_pitch = ref_pitch_in_bytes/size_of_float;
		cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float,  ref_width*size_of_float, height, cudaMemcpyHostToDevice);
	}

}



/**
  * K nearest neighbor algorithm
  * - Initialize CUDA
  * - Allocate device memory
  * - Copy point sets (reference and query points) from host to device memory
  * - Compute the distance to the k-th nearest neighbor for each query point
  * - Copy distances from device to host memory
  *
  * @param ref_host      reference points ; pointer to linear matrix
  * @param ref_width     number of reference points ; width of the matrix
  * @param query_host    query points ; pointer to linear matrix
  * @param query_width   number of query points ; width of the matrix
  * @param height        dimension of points ; height of the matrices
  * @param k             number of neighbor to consider
  * @param output_host   distances to k-th nearest neighbor ; pointer to linear matrix
  *
  */
void knn(int ref_width, float* query_host, int query_width, int height, int k, float* p_OutputSqDist, int* p_OutputIdx)
{    
	const unsigned int size_of_float = sizeof(float);
	unsigned int size_of_int   = sizeof(int);

	// Split queries to fit in GPU memory
	for(int i=0; i<query_width; i += (int)max_nb_query_traited)
	{
		// Number of query points considered
		actual_nb_query_width = __min( max_nb_query_traited, query_width-i );

		// Copy of part of query actually being treated
		cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice);

		// Grids ans threads
		dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
		dim3 t_16x16(16, 16, 1);
		if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
		if (ref_width  %16 != 0) g_16x16.y += 1;
		//
		dim3 g_256x1(actual_nb_query_width/256, 1, 1);
		dim3 t_256x1(256, 1, 1);
		if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
		//
		dim3 g_k_16x16(actual_nb_query_width/16, k/16, 1);
		dim3 t_k_16x16(16, 16, 1);
		if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
		if (k  %16 != 0) g_k_16x16.y += 1;

		// Kernel 1: Compute all the distances
		if (use_texture)
			cuComputeDistanceTexture<<<g_16x16,t_16x16>>>(ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
		else
			cuComputeDistanceGlobal<<<g_16x16,t_16x16>>>(ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);

		// Kernel 2: Sort each column
		cuInsertionSort<<<g_256x1,t_256x1>>>(dist_dev, query_pitch, ind_dev, ind_pitch, actual_nb_query_width, ref_width, k);

		// Memory copy of output from device to host
		cudaMemcpy2D(&p_OutputSqDist[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, k, cudaMemcpyDeviceToHost);
		cudaMemcpy2D(&p_OutputIdx[i],  query_width*size_of_int,   ind_dev,  ind_pitch_in_bytes,   actual_nb_query_width*size_of_int,   k, cudaMemcpyDeviceToHost);
	}
}

void CleanUp()
{
	// Free memory
	if (use_texture)
		CudaFreeArrayRec(ref_array);
	else
		CudaFreeRec(ref_dev);
	CudaFreeRec(ind_dev);
	CudaFreeRec(query_dev);
}

