///////////////////////////////////////////////////////////////////////////////////////////////////////
//  This file currently contains the fastest iteration of CUDA code for AEPD
//  It uses the look up table system developed in fastAEPD_fit() and the optimal thread summing scheme
//  (8x64)
///////////////////////////////////////////////////////////////////////////////////////////////////////

#include "math_functions.h"
#include "fastCalculate.h"
#include <iostream>

using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelTableCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
// each storing 40,000*8 bytes = 320,000 bytes
//
// Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
// So we need to launch another Kernel to do the final calculations of X_minus.
//
// The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  EX: fix k; say k = 5
//
//  This gets to a set of 80 blocks
//  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
//
//  Then fix q, say q = 10
//  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
//
//  Sum up that result and put it in dev_X_minus and dev_X_plus
//
/////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void  kernelTableCalculateXplusXminus (int * dev_tableLength, int *dev_tableNumberElements, int *dev_intTableData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug, int * dev_debugIntP1)
{
//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelFastLowMemCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
// each storing 40,000*8 bytes = 320,000 bytes
//
// Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
// So we need to launch another Kernel to do the final calculations of X_minus.
//
// The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  EX: fix k; say k = 5
//
//  This gets to a set of 80 blocks
//  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
//
//  Then fix q, say q = 10
//  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
//
//  Sum up that result and put it in dev_X_minus and dev_X_plus
//
/////////////////////////////////////////////////////////////////////////////////////////////////////

	int tid =threadIdx.x; // Launch code : dim3 block = (tableLength, 1);
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q

	__shared__ int p1;
	int tmp;
	int tmp1;
	int tmpIndex = 0;
	__shared__ int tableLength;
	if (tid == 0)
		tableLength = dev_tableLength[0];

	__syncthreads();
	__shared__ int tableNumberElements [8];
	__shared__ int intTableData[8];
	__shared__ double temp_X_minus[8];
	__shared__ int numXplus[8];
	__shared__ int numXminus[8];


	tableNumberElements[tid] = dev_tableNumberElements[tid];
	intTableData [tid] = dev_intTableData[tid];
	numXplus[tid] = tableNumberElements[tid];
	numXminus[tid] = 0;
	temp_X_minus [tid] = 0;

	__syncthreads();
	if (tid == 0){
		tmp = tableNumberElements[tmpIndex];
		tmp1 = 0;
		while (dataIndex >= tmp){
			numXminus[tmpIndex] = tableNumberElements[tmpIndex];
			numXplus[tmpIndex] = 0;
			tmp1 = tmp;
			tmp += tableNumberElements[++tmpIndex];
		}
		numXminus[tmpIndex] = dataIndex - tmp1 + 1;
		numXplus[tmpIndex] = tmp - dataIndex;
		p1 = intTableData[tmpIndex];
//		dev_debugIntP1[blockID] = p1;
	}
	__syncthreads();
//
	if (tid < tableLength){
		temp_X_minus[tid] = dev_deviationMatrix[(abs(intTableData[tid]-p1))*sizeAlpha + alphaIndex];
//		temp_X_minus[tid] = abs(intTableData[tid]-p1)*sizeAlpha + alphaIndex;
	}

//	__syncthreads();

//


	if (tid == 1){

		///////////////////////////////////////////////////////////////////////////////////////////
		// Debugging Code:
		///////////////////////////////////////////////////////////////////////////////////////////

//		dev_debugBig[blockID] = dev_deviationMatrix[abs(intTableData[tid]-p1)*sizeAlpha + alphaIndex];
//		dev_debugIntP1[blockID] = numXminus[5];
//		dev_debugIntP1[blockID] = -1 * p1;
//		if (dataIndex == 277){
////			dev_debug[0] = temp_X_minus[0];
////			dev_debug[1] = temp_X_minus[1];
////			dev_debug[2] = temp_X_minus[2];
////			dev_debug[3] = temp_X_minus[3];
////			dev_debug[4] = temp_X_minus[4];
//			dev_debugIntP1[0] = numXminus[0];
//			dev_debugIntP1[1] = numXminus[1];
//			dev_debugIntP1[2] = numXminus[2];
//			dev_debugIntP1[3] = numXminus[3];
//			dev_debugIntP1[4] = numXminus[4];
//			dev_debugIntP1[5] = numXminus[5];
//			dev_debugIntP1[6] = numXminus[6];
//			dev_debugIntP1[7] = numXminus[7];
//
//		}
		///////////////////////////////////////////////////////////////////////////////////////////
		// End Debugging Code:
		///////////////////////////////////////////////////////////////////////////////////////////


		// Sum from 0 to 7 because there are 8 threads launched.

		dev_X_minus[blockID] = temp_X_minus[0]*numXminus[0] + temp_X_minus[1]*numXminus[1] + temp_X_minus[2]*numXminus[2] + temp_X_minus[3]*numXminus[3] + temp_X_minus[4]*numXminus[4] + temp_X_minus[5]*numXminus[5] + temp_X_minus[6]*numXminus[6] + temp_X_minus[7]*numXminus[7] ;
		dev_X_plus[blockID] = temp_X_minus[0]*numXplus[0] + temp_X_minus[1]*numXplus[1] + temp_X_minus[2]*numXplus[2] + temp_X_minus[3]*numXplus[3] + temp_X_minus[4]*numXplus[4] + temp_X_minus[5]*numXplus[5]+ temp_X_minus[6]*numXplus[6]+ temp_X_minus[7]*numXplus[7];
	}



} // end kernelTableCalculateXplusXminus

__global__ void  kernelFastLowMemCalculateXplusXminus (int * dev_intData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q
//	int threadsInBlock = blockDim.y*blockDim.x;
//	double alpha = dev_alpha[alphaIndex];
//	double p1 = dev_data[dataIndex];
	int p1 = dev_intData[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 * for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *  	p1 = data[k];

			for( int q = 0; q< N; q++){
				if(q<=k){
					// FACTOR = 10000, round should not be doing anything
					x_minus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_plus_tmp[q]=0;
				}
				else if(q>k){
					x_plus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_minus_tmp[q]=0;
				}
			}

			for ( int q = 0; q < sizeA; q++){
				X_minus[q] = 0;
				X_plus[q] = 0;

				for( int t = 0; t< N; t++){
					if(x_minus_tmp[t]>0){
						///cout<<"x_minus:\t"<< D[x_minus_tmp[t]][q]<<endl;
						//X_minus[q] += D[x_minus_tmp[t]].shapeIDX[q] / N;
						X_minus[q] += D[x_minus_tmp[t]*sizeAlpha + q];
					}
					if(x_plus_tmp[t]>0){

						//X_plus[q] += D[x_plus_tmp[t]].shapeIDX[q]/ N;
						X_plus[q] += D[x_plus_tmp[t]*sizeAlpha + q];
					}
				}
			}
		}


		Rewrite to:

		for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){{
			p1 = data[k];
			X_left = k;
	 *		for ( int q = 0; q < sizeA; q++){
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += D[round(FACTOR*abs(data[q] - p1))*sizeAlpha + q];
	 *				else
	 *					X_plus[q] += D[round(FACTOR*abs(data[q] - p1)) * sizeAlpha + q];
	 *			}
	 *		}
	 *	}
	 *
	 *
	 *
	 *
	 *  EX: fix k; say k = 5
	 *  This maps to blockID's [400-479], alphaIndex [0-79], dataIndex = 5
	 *
	 *  p1 = data[dataIndex]
	 *  alpha = alpha[alphaIndex]
	 *
	 *  for each one of these blocks,
	 *
	 *  if tid < alphaIndex
	 *  	x_minus_tmp[tid] = abs(data[tid]-data[alphaIndex]);
	 *  else
	 *  	x_plus_tmp[q] = data[q]-data[alphaIndex];
	 *
	 *  Lets say blockID = 410, here dataIndex = 5, alphaIndex = 10
	 *
	 *  then for thread tid of this block:
	 *  if (tid <= 5)
	 *		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
	 *  else
	 *  	temp_X_minus[tid] = 0;
	 *
	 *
	 **************************************************************************/


	if (tid <= dataIndex){
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
		temp_X_minus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
//		temp_X_plus[tid] = pow((dev_data[tid]-p1), alpha);
		temp_X_plus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/////////////////////////////////////////////////////////////////////////
	// Debuggin for temp_X_minus
	/////////////////////////////////////////////////////////////////////////
/*
	if (dataIndex == 400 && alphaIndex == 40){
		//dev_debug[tid] = temp_X_minus[tid];
		//dev_debug[0] = p1;
		//dev_debug[1] = alpha;
		dev_debug[tid] = 1;
	}
	__syncthreads();
*/
	/////////////////////////////////////////////////////////////////////////
	// end debugging
	//////////////////////////////////////////////////////////////////////////

//	int i = threadsInBlock/2;  // i is the number of threads per block
//	while (i != 0) {
//		if (tid < i){
//			temp_X_minus[tid] += temp_X_minus[tid + i];
//			temp_X_plus[tid] += temp_X_plus[tid + i];
//		}
//		__syncthreads();
//		i /= 2;
//	}
	////////////////////////////////////////////////////////////////////////
	// 8 x 64
	// Different way of summing:
	// 1. Declare two more shared arrays for sums
	// 2. Each array has 8 elements
	// 3. Sum 64 elements for each array element
	// Result: This code reduces the time to 14.2 ms for 1 threads
	//		   Code reduces to 14.2 ms for 2 threads
	////////////////////////////////////////////////////////////////////////
	__shared__ double tempSumXplus [8];
	__shared__ double tempSumXminus [8];
	int offset;
	if (tid < 8){
		offset = tid * 64;
		tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
				temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
				temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
				temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
				temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
				temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32] + temp_X_plus[offset + 33] + temp_X_plus[offset + 34] + temp_X_plus[offset + 35] + \
				temp_X_plus[offset + 36] + temp_X_plus[offset + 37] + temp_X_plus[offset + 38] + temp_X_plus[offset + 39] + temp_X_plus[offset + 40] + temp_X_plus[offset + 41] + \
				temp_X_plus[offset + 42] + temp_X_plus[offset + 43] + temp_X_plus[offset + 44] + temp_X_plus[offset + 45] + temp_X_plus[offset + 46] + temp_X_plus[offset + 47] + \
				temp_X_plus[offset + 48] + temp_X_plus[offset + 49] + temp_X_plus[offset + 50] + temp_X_plus[offset + 51] + temp_X_plus[offset + 52] + temp_X_plus[offset + 53] + \
				temp_X_plus[offset + 54] + temp_X_plus[offset + 55] + temp_X_plus[offset + 56] + temp_X_plus[offset + 57] + temp_X_plus[offset + 58] + temp_X_plus[offset + 59] + \
				temp_X_plus[offset + 60] + temp_X_plus[offset + 61] + temp_X_plus[offset + 62] + temp_X_plus[offset + 63];

		tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
			temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
			temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
			temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
			temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
			temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32] + temp_X_minus[offset + 33] + temp_X_minus[offset + 34] + temp_X_minus[offset + 35] + \
			temp_X_minus[offset + 36] + temp_X_minus[offset + 37] + temp_X_minus[offset + 38] + temp_X_minus[offset + 39] + temp_X_minus[offset + 40] + temp_X_minus[offset + 41] + \
			temp_X_minus[offset + 42] + temp_X_minus[offset + 43] + temp_X_minus[offset + 44] + temp_X_minus[offset + 45] + temp_X_minus[offset + 46] + temp_X_minus[offset + 47] + \
			temp_X_minus[offset + 48] + temp_X_minus[offset + 49] + temp_X_minus[offset + 50] + temp_X_minus[offset + 51] + temp_X_minus[offset + 52] + temp_X_minus[offset + 53] + \
			temp_X_minus[offset + 54] + temp_X_minus[offset + 55] + temp_X_minus[offset + 56] + temp_X_minus[offset + 57] + temp_X_minus[offset + 58] + temp_X_minus[offset + 59] + \
			temp_X_minus[offset + 60] + temp_X_minus[offset + 61] + temp_X_minus[offset + 62] + temp_X_minus[offset + 63];
	}
	__syncthreads();
	//////////////////////////////////////////////////////////////////////////////////////
	// One Thread
	//////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_X_plus[blockID] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
		dev_X_minus[blockID] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
	}


	//////////////////////////////////////////////////////////////////////////////////////
	// Two Thread
	//////////////////////////////////////////////////////////////////////////////////////
//		if (tid == 0){
//			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
//		}
//		if (tid == 1){
//			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
//		}
	__syncthreads();

	////////////////////////////////////////////////////////////////////////////////////////////////////////////
	// End 8 x 64 architecture
	////////////////////////////////////////////////////////////////////////////////////////////////////////////


}// kernelFastLowMemCalculateXplusXminus





///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelcalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_X_minus[] and dev_X_plus[] that were calculated by
// the kernelCalculateXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelCalculateParameters (double *dev_X_minus, double *dev_X_plus, double *dev_H, double *dev_Kappa, double *dev_Sigma, double *dev_dataParam, double *dev_data, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_X_plus[] and dev_X_minus[] 500 x 80 long
	// The layout of dev_X_plus[] and dev_X_minus[] is that
	// The dev_X_plus[index - index + 79] are the X_plus values
	// are generated by k = index
	//
	// Ex: k = 0, dev_X_plus[0-79] is calculated with alpha[0-79]
	//     k = 1, dev_X_plus[80-159] is calculated with alpha[0-79]
	//
	// alpha = start_A + q * inc_A;
	//
	// Because the calculations of Kappa, Sigma and H are independent of
	// everything except X_plus, X_minus, this kernel makes 500 * 80 of those
	// calculations and then finds the minimum values over 512 threads to produce 80 values
	// for Kappa, Sigma and H  (The difference of 512 and 500 is made up by dummy values)
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	//
	// Below, every thread gets an X_minus value
	// There are 80 blocks representing the 80 unique alpha values
	// Need to find the correct X_plus and X_minus indexes
	//
	// dev_plus[0-79] is calculated with alpha[0-79]
	// dev_plus[0,80,160....499*80] is calculated with alpha[0]
	//
	// blockID = 0, tid = 0 -> map it to dev_plus[0]
	// blockID = 0, tid = 1 -> map it to dev_plus[80]
	// blockID = 0, tid = 2 -> map it to dev_plus[160]
	//
	// blockID = 1, tid = 0 -> map it to dev_plus[1]
	// blockID = 1, tid = 1 -> map it to dev_plus[81]
	//
	// So mapping scheme is: X_plus = dev_plus[tid*80+blockID]
	//////////////////////////////////////////////////////////////////////////////////////////

	// doing this calculation here doesn't effect the timing much, actually increased it a bit
	int offset = tid * 80;
	if (tid < windowSize){
		X_minus = dev_X_minus[offset + blockID];
		X_plus = dev_X_plus[offset + blockID];
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		// This is the check for NaN, the nvcc compiler doesn't recognize code to check nan
		// if (tempKappa[tid] != tempKappa[tid]) -> condition for nan, but always returns false

		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}

		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////
//	debugBIG[]
//  dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = dev_data[indexValue[0]];
		dev_H [blockID] = tempH[0];
	}

} // end kernelCalculateParameters



///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelNoDataCalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_X_minus[] and dev_X_plus[] that were calculated by
// the kernelCalculateXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelNoDataCalculateParameters (double *dev_X_minus, double *dev_X_plus, double *dev_H, double *dev_Kappa, double *dev_Sigma, int *dev_dataParam, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_X_plus[] and dev_X_minus[] 500 x 80 long
	// The layout of dev_X_plus[] and dev_X_minus[] is that
	// The dev_X_plus[index - index + 79] are the X_plus values
	// are generated by k = index
	//
	// Ex: k = 0, dev_X_plus[0-79] is calculated with alpha[0-79]
	//     k = 1, dev_X_plus[80-159] is calculated with alpha[0-79]
	//
	// alpha = start_A + q * inc_A;
	//
	// Because the calculations of Kappa, Sigma and H are independent of
	// everything except X_plus, X_minus, this kernel makes 500 * 80 of those
	// calculations and then finds the minimum values over 512 threads to produce 80 values
	// for Kappa, Sigma and H  (The difference of 512 and 500 is made up by dummy values)
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	//
	// Below, every thread gets an X_minus value
	// There are 80 blocks representing the 80 unique alpha values
	// Need to find the correct X_plus and X_minus indexes
	//
	// dev_plus[0-79] is calculated with alpha[0-79]
	// dev_plus[0,80,160....499*80] is calculated with alpha[0]
	//
	// blockID = 0, tid = 0 -> map it to dev_plus[0]
	// blockID = 0, tid = 1 -> map it to dev_plus[80]
	// blockID = 0, tid = 2 -> map it to dev_plus[160]
	//
	// blockID = 1, tid = 0 -> map it to dev_plus[1]
	// blockID = 1, tid = 1 -> map it to dev_plus[81]
	//
	// So mapping scheme is: X_plus = dev_plus[tid*80+blockID]
	//////////////////////////////////////////////////////////////////////////////////////////

	// doing this calculation here doesn't effect the timing much, actually increased it a bit
	int offset = tid * 80;
	if (tid < windowSize){
		X_minus = dev_X_minus[offset + blockID];
		X_plus = dev_X_plus[offset + blockID];
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		// This is the check for NaN, the nvcc compiler doesn't recognize code to check nan
		// if (tempKappa[tid] != tempKappa[tid]) -> condition for nan, but always returns false

		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}

		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////
//	debugBIG[]
//  dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = indexValue[0];
		dev_H [blockID] = tempH[0];
	}

} // end kernelNoDataCalculateParameters













/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/




void CUDAfastAEPD_DistWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_deviationMatrix){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	double *datatmp;
	datatmp = (double *) malloc (sizeof(double)*windowSize);
	for(int i = 0; i < windowSize ;i++){
		datatmp[i] = data[i];
	}
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (datatmp[i] > datatmp[j]){
				tmp  = datatmp[i];
				datatmp[i] = datatmp[j];
				datatmp[j] = tmp;
			}
		}
	}
//	for(int i = 0; i < windowSize ;i++){
//		cout << "Data [" << i << "] = " << datatmp[i] << endl;
//	}
	/////////////////////////////////////////////////////////////////////////
	// Perform multiplication and rounding here
	/////////////////////////////////////////////////////////////////////////



	int * integerData;
	integerData = (int *) malloc (sizeof(int) * windowSize);

	for(int i = 0; i < windowSize; i++){
		integerData[i] = round(datatmp[i] * FACTOR);
//		cout << "integer data[" << i << "] = " << integerData[i] << endl;
	}

	////////////////////////////////////////////////////////////////////////////////
	//
	// kernelFastCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	//
	// dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	// dev_X_plus and dev_X_minus will be [500x80]
	//
	////////////////////////////////////////////////////////////////////////////////
    int *dev_intData;
	double *dev_data, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_intData, sizeof(int) * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy integerData to dev_intData.
    cudaMemcpy( dev_intData, integerData, sizeof(int) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_data, datatmp, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );
    ////////////////////////////////////////////////////////////////////////////////
    // Debug for mem copy.
    ////////////////////////////////////////////////////////////////////////////////
//    cout << "In fast calculate" << endl;
//	double *debugAlpha;
//	debugAlpha= (double *) malloc (sizeAlpha*sizeof(double));
//    cudaMemcpy(debugAlpha , dev_alpha, sizeof(double) * sizeAlpha , cudaMemcpyDeviceToHost );
//
//    for (int i = 0; i < sizeAlpha; i++){
//    	cout << "alpha[" << i << "] = " << debugAlpha[i] << endl;
//    }
//
//    cudaMemcpy(debugAlpha , dev_lnGammaofAlpha, sizeof(double) * sizeAlpha , cudaMemcpyDeviceToHost );
//    for (int i = 0; i < sizeAlpha; i++){
//    	cout << "log gamma alpha[" << i << "] = " << debugAlpha[i] << endl;
//    }
//    int * debugInt;
//    debugInt = (int *) malloc (lengthDevData*sizeof(int));
//    cudaMemcpy(debugInt , dev_intData, sizeof(int) * lengthDevData , cudaMemcpyDeviceToHost );
//    cudaMemcpy(debug1 , dev_data, sizeof(double) * lengthDevData , cudaMemcpyDeviceToHost );
//    for (int i = 0; i < lengthDevData; i++){
//    	cout << "dev_intData[" << i << "] = " << debugInt[i];
//    	cout << "       " << "data[" << i << "] = " << debug1[i] << endl;
//    }

    ////////////////////////////////////////////////////////////////////////////////
    // End debug for mem copy.
    ////////////////////////////////////////////////////////////////////////////////


    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

    kernelFastLowMemCalculateXplusXminus <<<Grid, Block>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);

    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////






//    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "In Original Code" << endl;
//    cout << "For k = 275" << endl;
//    for (int i = 0; i < lengthDevAlpha; i++){
//    	int offset = 275*80;
//    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
//    }
//
//    cout << "X_minus fast" << endl;
//	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
//		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
//	}

//    cudaMemcpy(debugBig , dev_X_plus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "X_plus Table" << endl;
//	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
//		cout << "device X_plus[" << i << "] = " << debugBig[i] << endl;
//	}

    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
 	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
 	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
 	// This is done to be able to debug as each block simulates one alpha
 	// because of the fact that we are trying to find a minimum over all H[k][q]
 	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
 	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
 	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

 //	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
 	int KCP_devSize =  80; // Need to fix this to be written dynamically later

     double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
     double *Kappa, *Sigma, *dataParam, *H;

     // Allocate host memory for Kappa, Sigma, dataParam
     Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
     Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
     dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
     H = (double *) malloc(sizeof(double)*lengthDevAlpha);

     // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
     cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_dataParam, sizeof(double) * KCP_devSize);

     dim3 BlockKCP(16,32);
     dim3 GridKCP(10,8);

     kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
 	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );



    /////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

/*	cout << "Debugging kernel calculate Parameters" << endl;
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     H[" << i << "] = " << H[i];
//		cout << "     dataParam[" << i << "] = " << dataParam[i];
//		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}

*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha



    ////////////////////////////////////////////////////////////////////////////////////////////////
    // Clear memeory
    ///////////////////////////////////////////////////////////////////////////////////////////////

	free (datatmp);
    free (Kappa);
    free (Sigma);
    free (dataParam);
    free (H);
    free (debug1);
    free (debug2);
    free (debugBig);
    cudaFree (dev_data);
    cudaFree (dev_intData);
    cudaFree (dev_X_plus);
    cudaFree (dev_X_minus);
    cudaFree (dev_debugBig);
    cudaFree (dev_debug);
    cudaFree (dev_Kappa);
    cudaFree (dev_Sigma);
    cudaFree (dev_dataParam);
    cudaFree (dev_H);
    return;

} // end CUDAfastAEPD_DistWrapper (Version with Data)


void CUDAfastAEPD_DistWrapper (int tableLength, ForexTable * TablePointer, double *param, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_deviationMatrix){
	////////////////////////////////////////////////////////////////////////////
	// Recreate the data array from the table (Debug purposes only)
	///////////////////////////////////////////////////////////////////////////

/*	double * data;
	data = (double *) malloc (sizeof(double) * windowSize);
	int dataIndex2 = 0;
	for (int i = 0; i < tableLength; i++){
		for (int j = 0; j < TablePointer[i].NumberofElements; j++)
			data[dataIndex2++] = TablePointer[i].ValueofElement;
	}

	for (int i = 0; i < windowSize; i++)
		cout << "data[" << i <<"] = " << data[i] << endl;
*/


	///////////////////////////////////////////////////////////////////////////
	// Copy Table Data to GPU
	///////////////////////////////////////////////////////////////////////////
	int *dev_tableLength, *dev_tableNumberElements,  *dev_intTableData, *dev_debugIntP1, *host_tableLength, *host_tableNumberElements, *host_intTableData, *debugIntSmall, *debugIntBig, *debugIntP1;
	double  *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debugIntP1 = (int *) malloc( sizeof(int) *lengthDevData*lengthDevAlpha);
    debugIntSmall = (int *) malloc ( sizeof (int));
    debugIntBig = (int *) malloc (sizeof (int ) * 8);
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevAlpha*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));
    host_tableLength = (int *) malloc (sizeof(int));
    host_tableNumberElements = (int *) malloc (8 * sizeof(int));
    host_intTableData = (int *) malloc (8 * sizeof(int));

    // copy from Forextable to int *, could be done pre-processing.
    host_tableLength[0] = tableLength;

    for (int i= 0; i < tableLength; i++){
    	host_tableNumberElements [i] = TablePointer[i].NumberofElements;
    	host_intTableData [i] = round(TablePointer[i].ValueofElement * FACTOR);
    }
    for (int i = tableLength; i < 8; i++){
    	host_tableNumberElements [i] = 0;
    	host_intTableData [i] = 0;
    }

    // allocate device memory

    cudaMalloc( (void **)&dev_tableLength, sizeof(int) );
    cudaMalloc( (void **)&dev_tableNumberElements, sizeof(int) * 8 );
    cudaMalloc( (void **)&dev_intTableData, sizeof(int) * 8 );
    cudaMalloc( (void **)&dev_debugIntP1, sizeof(int) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy integerData to dev_intData.

    cudaMemcpy( dev_tableLength, host_tableLength, sizeof(int), cudaMemcpyHostToDevice );
    cudaMemcpy( dev_tableNumberElements, host_tableNumberElements, sizeof(int)*8, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_intTableData, host_intTableData, sizeof(int)*8, cudaMemcpyHostToDevice );

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Debug copys
    ////////////////////////////////////////////////////////////////////////////////////////////

//    cudaMemcpy( debugIntSmall, dev_tableLength, sizeof(int), cudaMemcpyDeviceToHost );
//    cout << "Device Table Length: " << debugIntSmall[0] << endl;
//    cudaMemcpy( debugIntBig, dev_tableNumberElements, sizeof(int)*8, cudaMemcpyDeviceToHost );
//    for (int i = 0; i < 8; i++){
//    	cout << "host num of elements[" << i << "] = " << host_tableNumberElements[i] << "       " << endl;
//    	cout << "Number of Elements[" << i << "] = " << debugIntBig[i] << endl;
//    }
//    cudaMemcpy( debugIntBig, dev_intTableData, sizeof(int)*8, cudaMemcpyDeviceToHost );
//    for (int i = 0; i < 8; i++){
//    	cout << "Host Data[" << i << "] = " << host_intTableData[i] << endl;
//    	cout << "Int Data[" << i << "] = " << debugIntBig[i] << endl;
//    }

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Finished debug copys
    ////////////////////////////////////////////////////////////////////////////////////////////

    dim3 Grid(lengthDevData, lengthDevAlpha);
    // Need to change this to be dependent on table length.
    dim3 Block(8, 1);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

    kernelTableCalculateXplusXminus <<<Grid, Block>>> (dev_tableLength, dev_tableNumberElements, dev_intTableData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug, dev_debugIntP1 );

    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelTableCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////


//    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "X_minus Table" << endl;
//	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
//		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
//	}

//    cudaMemcpy(debugBig , dev_X_plus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "X_plus Table" << endl;
//	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
//		cout << "device X_plus[" << i << "] = " << debugBig[i] << endl;
//	}
//
//    cudaMemcpy(debugIntP1 , dev_debugIntP1, sizeof(int) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    for (int i = 0; i < lengthDevData * lengthDevAlpha; i++){
//    	cout << "num of x1 of 0[" << i << "] = " << debugIntP1[i] << endl;
//    }
//    cudaMemcpy(debugBig , dev_debugBig, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "num_X_minus" << endl;
////    for (int i = 275*sizeAlpha; i < 300*sizeAlpha; i++){
//    for (int i = 0; i < lengthDevData * lengthDevAlpha; i++){
//    	cout << "num of x1[" << i << "] = " << debugIntP1[i] << "       ";
//    	cout << "value of x1 [" << i << "] = " << debugBig[i] << endl;
//	}

//    cudaMemcpy(debugBig , dev_debugBig, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "temp_X_minus" << endl;
////	for (int i = 275*sizeAlpha; i < 300*sizeAlpha; i++){
//    for (int i = 0; i < lengthDevData * lengthDevAlpha; i++){
//		cout << "temp X_minus 0 [" << i << "] = " << debugBig[i] << endl;
//	}

//    cudaMemcpy(debug2 , dev_debug, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "temp_X_minus" << endl;
//	for (int i = 0; i < 5; i++){
//		cout << "temp_X_minus[" << i << "] = " << debug2[i] << endl;
//	}

    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelTableCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////



		/////////////////////////////////////////////////////////////////////////
		// End kernelTableCalculateXplusXminus
	    /////////////////////////////////////////////////////////////////////////

	    ///////////////////////////////////////////////////////////////////////////////////////////////////////
	    // kernelcalculateParameters() is a CUDA kernel
	 	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
	    // where k and q make up the index to dev_X_minus and dev_X_plus
	 	//
	    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
	    // the kernelCalculateXplusXminus() Kernel.
	    //
	    // Right now, launch with Blocks(16,32) Grid(10,8)
	 	// This is done to be able to debug as each block simulates one alpha
	 	// because of the fact that we are trying to find a minimum over all H[k][q]
	 	// the launch could be varied
	    //
	    ///////////////////////////////////////////////////////////////////////////////////////////////////////

	    // Allocate global memories for Kappa[], Sigma[], dataParam[]
	 	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
	 	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

	 //	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
	 	int KCP_devSize =  80; // Need to fix this to be written dynamically later
	 	int *dev_dataParam, *dataParam;
	     double *dev_Kappa, *dev_Sigma, *dev_H;
	     double *Kappa, *Sigma,  *H;

	     // Allocate host memory for Kappa, Sigma, dataParam
	     Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
	     Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
	     dataParam = (int *) malloc(sizeof(int) * lengthDevAlpha);
	     H = (double *) malloc(sizeof(double)*lengthDevAlpha);

	     // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
	     cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
	     cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
	     cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
	     cudaMalloc( (void **)&dev_dataParam, sizeof(int) * KCP_devSize);

	     dim3 BlockKCP(16,32);
	     dim3 GridKCP(10,8);

	     kernelNoDataCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
	 	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	 	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	 	cudaMemcpy(dataParam , dev_dataParam, sizeof(int) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	 	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );


	    /////////////////////////////////////////////////////////////////////////
		// debugging kernelcalculateParameters()
	    /////////////////////////////////////////////////////////////////////////

	/*	cout << "Debugging kernel calculate Parameters" << endl;
		for (int i = 0; i < lengthDevAlpha; i++){
			cout << "Kappa[" << i << "] = " << Kappa[i];
			cout << "     Sigma[" << i << "] = " << Sigma[i];
			cout << "     H[" << i << "] = " << H[i];
	//		cout << "     dataParam[" << i << "] = " << dataParam[i];
	//		cout << "     alpha[" << i << "] = " << alpha[i];
			cout << endl;
		}

	*/
		/////////////////////////////////////////////////////////////////////////
		// End debugging kernelcalculateParameters()
	    /////////////////////////////////////////////////////////////////////////


		/////////////////////////////////////////////////////////////////////////
		// End kernelcalculateParameters()
	    /////////////////////////////////////////////////////////////////////////

		////////////////////////////////////////////////////////////////////////
		// Final Min: H holds 80 minimum values for each alpha value, find the min
		// index of H and exit.  This operation can be done on the host
		// or the device side, we can see which is faster.
		//
		//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
		//		min_H = H[q][k];
		//		idx_k = k;
		//		idx_q = q;
		//	}
		//
		//  This means that blocks 0, 79,78 cannot be the index.
		//
		//
		/////////////////////////////////////////////////////////////////////////

		// Host side code:
		double minH = 1e10;
		int indexAlpha = 0;
		for (int i = 1; i<lengthDevAlpha-2; i++){
			if (H[i] < minH){
				minH = H[i];
				indexAlpha = i;
			}
		}

//    	host_tableNumberElements [i] = TablePointer[i].NumberofElements;
//    	host_intTableData [i] = round(TablePointer[i].ValueofElement * FACTOR);

		/////////////////////////////////////////////////////////////////////
		// Code to determine parameter[0] based on indexAlpha
		// Remember that param[0] = sorted Data Array [indexAlpha]
		/////////////////////////////////////////////////////////////////////

		int dataIndex = 0;
		int dataTmp = host_tableNumberElements[dataIndex];
		while (dataParam[indexAlpha]> dataTmp-1)	// dataTmp-1 because indexAlpha is index 0 while dataTmp is index 1
			dataTmp += host_tableNumberElements[++dataIndex];

		// Debugging for param[0]
//		cout << "dataIndex = " << dataIndex << "        ";
//		cout << "dataParam[indexAlpha] " << dataParam[indexAlpha] << "        ";
//		cout << "host_tableNumberElements[0]" << host_tableNumberElements[0] << endl;

		param[0] = TablePointer[dataIndex].ValueofElement; // theta
		param[1] = Sigma[indexAlpha]; // sigma
		param[2] = Kappa[indexAlpha]; // kappa
		param[3] = 0.7 + indexAlpha * 0.1; // alpha



    ////////////////////////////////////////////////////////////////////////////////////////////
    // Free Memories
    ////////////////////////////////////////////////////////////////////////////////////////////



	cudaFree (dev_Kappa);
	cudaFree (dev_Sigma);
	cudaFree (dev_dataParam);
	cudaFree (dev_H);
	cudaFree (dev_tableLength);
    cudaFree (dev_tableNumberElements);
    cudaFree (dev_intTableData);
    cudaFree (dev_X_plus);
    cudaFree (dev_X_minus);
    cudaFree (dev_debugBig);
    cudaFree (dev_debug);
    cudaFree (dev_debugIntP1);
    free (Kappa);
    free (Sigma);
    free (dataParam);
    free (H);

    free (debugIntP1);
    free (host_tableLength);
    free (host_tableNumberElements);
    free (host_intTableData);
    free (debugIntSmall);
    free (debugIntBig);
    free (debug1);
    free (debug2);
    free (debugBig);

	return;
}// end CUDAfastAEPD_DistWrapper (Version with Table)

/////////////////////////////////////////////////////////////////////////////
// AEPD_fit from Aleks.
/////////////////////////////////////////////////////////////////////////////

void localAEPD_Fit(double * x, int N, double * params){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	double *data;
	data = (double *) malloc (sizeof(double)*windowSize);
	for(int i = 0; i < windowSize ;i++){
		data[i] = x[i];
	}
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}

	///sort(data.begin(),data.end());

	//for (int i = 0;i< N; i++){
	//	cout << "data sorted element " << i << " = " << data[i] << endl;
	//}

	//for (int i = 0;i< 4; i++){
	//	cout << "params " << i << " = " << params[i] << endl;
	//}
	double INCREMENT_ALPHA = 0.1;
	int sizeALPHA = 80;
	int NUM_STEPS = 500;

	double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	double start_A = 0.7;//////// Value to Start Alpha Iteration
	int sizeA = sizeALPHA;//// Number of iterations to perform

	double ** Kappa = new double*[sizeA];
	double ** Sigma = new double*[sizeA];
	double ** H = new double*[sizeA];
	for (int i = 0 ; i< sizeA ; i++){
		Kappa[i] = new double[N];
		Sigma[i] = new double[N];
		H[i] = new double[N];
	}

	double * X_minus = new double[sizeA];
	double * X_plus = new double[sizeA];


	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;

	//int stepX = round(N/NUM_STEPS);
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{

		double p1 = data[k];

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

		for( int q = 0; q< N; q++){
			if (q < X_left)
				x_minus_tmp[q] = abs(data[q] - p1);
			else
				x_plus_tmp[q - X_left] = data[q] - p1;
		}

		/* start_A = 0.7
		 * q = [0 -> sizeA]
		 * inc_A = incrementAlpha
		 * alpha = 0.7 + q * inc_A
		 * Basically alpha goes from the start value through sizeA increments of inc_A 		 *
		 */


		for ( int q = 0; q < sizeA; q++){
			X_minus[q] = 0;
			X_plus[q] = 0;

			double alpha = start_A + q * inc_A;
			/* Goes through the array, If this is matlab then the code would be:
			 * alpha[sizeA] = 0.7 + sizeA*inc_A
			 * data[N] = data vector coming in
			 * k = index value			 *
			 * x_minus_tmp [k];
			 * x_plus_tmp[N-k];
			 * x_minus_tmp = abs( data[0-k] - data[k] );
			 * x_plus_tmp = data[k-N] - data[k];
			 * X_minus[q] = sum (x_minus_tmp.^alpha[q])/N;
			 * X_plus[q] = sum (x_plus_tmp.^alpha[q])/N;
			 *
			 * So the outer loop pins k, the inner loop pins q.
			 * and for each nested loop we have X_minus[k, q] and X_plus [k,q]
			 *
			 */

			for (int t = 0; t < N; t++){
				if (t < X_left)
					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
				else
					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
			}


			/////////////////////////////////////////////////////////////////////////////////////////
			// Debugging code
			////////////////////////////////////////////////////////////////////////////////////////////
//			cout << "k = " << k << "q = " << q << "X_minus = " << X_minus[q] << endl;

	/*		if (k == 400){
				if (q == 40){
					for (int i = 0; i<k; i++)
						cout << "host pow(x_minus_tmp["<< i << "],alpha) = "  << pow(x_minus_tmp[i], alpha) << endl;
				}
			}

			if (k == 275)
				cout << "host X_minus[" << q << "] = " << X_minus[q] << endl; */
		}

		//Produce potential parameter sets as a function of  Alpha, Sigma, Kappa which will be used for maximization
		for (int q = 0; q < sizeA; q++){

			double alpha = start_A + q * inc_A;
			Kappa[q][k] = pow( X_minus[q] / X_plus[q], 1 / (2*(alpha +1)));

			Sigma[q][k] = pow((alpha*  pow( X_minus[q]*X_plus[q], alpha/ (2*(alpha+1)))\
				* ( pow(X_plus[q], 1/(alpha+1)) + pow(X_minus[q], 1/(alpha+1)))), 1/alpha);

			double sgngam = 0;
			H[q][k] = log(Sigma[q][k]) + locallngamma(1/alpha, &sgngam) \
				+ log(Kappa[q][k] + 1/Kappa[q][k]) + 1/alpha - log(alpha);

			//cout << "Kappa : " << Kappa[q* N + k] << "\t" << "Sigma : " << Sigma[q* N + k] << "\t"<< "H :" << H[q* N + k] << endl;
			// keep record of the minimum entropy index
			if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
				min_H = H[q][k];
				idx_k = k;
				idx_q = q;
			}
		}

		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
	}

	///////////////////////////////////////////////////////////////////////////////////////////
	// Debug code to print out and compare
	//////////////////////////////////////////////////////////////////////////////////////////
/*	for (int q = 0; q < sizeA; q++){
		double minH2 = 1e10;
		double tempKappa;
		double tempSigma;
		for (int k = 0; k < N; k++){
			if (H[q][k] < minH2){
				minH2 = H[q][k];
				tempKappa = Kappa[q][k];
				tempSigma = Sigma[q][k];
			}
		}
		cout << "Kappa[" << q << "] = :" << tempKappa;
		cout << "      Sigma[" << q << "] = :" << tempSigma;
		cout << endl;
	}
*/
	///////////////////////////////////////////////////////////////////////////////////////////
	// end debug
	//////////////////////////////////////////////////////////////////////////////////////////

	params[0] = data[idx_k]; // theta
	params[1] = Sigma[idx_q][idx_k]; // sigma
	params[2] = Kappa[idx_q][idx_k]; // kappa
	params[3] = start_A + idx_q * inc_A; // alpha

	for (int i = 0 ; i< sizeA ; i++){
		delete [] Kappa[i];
		delete [] Sigma[i];
		delete [] H[i];

	}

	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	delete [] X_minus;
	delete [] X_plus;
	//cout << "Got here with no problem "<< endl;
}

////////////////////////////////////////////////////////////////////
// Log gamma function from Aleks
////////////////////////////////////////////////////////////////////


//////////////////////////////////////////////////////////////////////////////////////////////////////
//
// structureAEPD_fit
// This function mimics AEPD_Fit() using data structures,
// This functions uses pre-processed log-gamma alpha values.
//
//////////////////////////////////////////////////////////////////////////////////////////////////////

void structureAEPD_Fit(int tableLength, ForexTable * TablePointer,  int N,  double * params){

	/////////////////////////////////////////////////
	//  Two arrays for X_plus and X_minus
	/////////////////////////////////////////////////

	int * numValuesXplus, *numValuesXminus;
	double *tableValues, *changeTableValues;
	numValuesXplus = (int *)malloc(sizeof(int)*tableLength);
	numValuesXminus = (int *)malloc(sizeof(int)*tableLength);
	tableValues= (double *)malloc(sizeof(double)*tableLength);
	changeTableValues= (double *)malloc(sizeof(double)*tableLength);

	for (int i = 0; i < tableLength; i++){
		numValuesXplus[i] = TablePointer[i].NumberofElements;
		numValuesXminus[i] = 0;
		tableValues[i] = TablePointer[i].ValueofElement;
//		cout << "Number of Elements : " << TablePointer[i].NumberofElements <<  "             Value of Elements : " << TablePointer[i].ValueofElement << endl;
	}

	double INCREMENT_ALPHA = 0.1;
	int sizeALPHA = 80;
	int NUM_STEPS = 500;

	double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	double start_A = 0.7;//////// Value to Start Alpha Iteration
	int sizeA = sizeALPHA;//// Number of iterations to perform

	double ** Kappa = new double*[sizeA];
	double ** Sigma = new double*[sizeA];
	double ** H = new double*[sizeA];
	for (int i = 0 ; i< sizeA ; i++){
		Kappa[i] = new double[N];
		Sigma[i] = new double[N];
		H[i] = new double[N];
	}

	double * X_minus = new double[sizeA];
	double * X_plus = new double[sizeA];


	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;

	//int stepX = round(N/NUM_STEPS);
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{
		/////////////////////////////////////////////////////////////////////
		// Code to determine p1
		/////////////////////////////////////////////////////////////////////

		int dataIndex = 0;
		int dataTmp = TablePointer[dataIndex].NumberofElements;
		int dataLeftover = 0;
		while (k > dataTmp-1){	// dataTmp-1 because indexAlpha is index 0 while dataTmp is index 1
			dataLeftover = dataTmp-1;
			dataTmp += TablePointer[++dataIndex].NumberofElements;
		}
		double p1 = TablePointer[dataIndex].ValueofElement;
		for (int i = 0; i < tableLength; i++){
			if (i < dataIndex){
				numValuesXminus[i] = TablePointer[i].NumberofElements;
				numValuesXplus[i] = 0;
			}
			if (i == dataIndex){
				numValuesXminus[i] = k-dataLeftover;
				numValuesXplus[i] = TablePointer[i].NumberofElements- (k-dataTmp+1);
			}
			changeTableValues[i] = abs(tableValues[i]-p1);
			//cout << " K =  " << k << " numValuesXminus[i] = " << numValuesXminus[i]<< "       changeTableValues[i] = " << changeTableValues[i] << endl;
		}

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}

		/* start_A = 0.7
		 * q = [0 -> sizeA]
		 * inc_A = incrementAlpha
		 * alpha = 0.7 + q * inc_A
		 * Basically alpha goes from the start value through sizeA increments of inc_A 		 *
		 */


		for ( int q = 0; q < sizeA; q++){
			X_minus[q] = 0;
			X_plus[q] = 0;

			double alpha = start_A + q * inc_A;
			/* Goes through the array, If this is matlab then the code would be:
			 * alpha[sizeA] = 0.7 + sizeA*inc_A
			 * data[N] = data vector coming in
			 * k = index value			 *
			 * x_minus_tmp [k];
			 * x_plus_tmp[N-k];
			 * x_minus_tmp = abs( data[0-k] - data[k] );
			 * x_plus_tmp = data[k-N] - data[k];
			 * X_minus[q] = sum (x_minus_tmp.^alpha[q])/N;
			 * X_plus[q] = sum (x_plus_tmp.^alpha[q])/N;
			 *
			 * So the outer loop pins k, the inner loop pins q.
			 * and for each nested loop we have X_minus[k, q] and X_plus [k,q]
			 *
			 */
			for (int i = 0; i < tableLength; i++){
				X_minus[q] += numValuesXminus[i]*(pow(changeTableValues[i],alpha));
				X_plus[q] += numValuesXplus[i]*(pow(changeTableValues[i],alpha));
			}

			X_minus[q] = X_minus[q] / N;
			X_plus[q] = X_plus[q] / N;

//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
			/////////////////////////////////////////////////////////////////////////////////////////
			// Debugging code
			////////////////////////////////////////////////////////////////////////////////////////////

//			cout << "k = " << k << "q = " << q << "X_minus = " << X_minus[q] << endl;
	/*		if (k == 400){
				if (q == 40){
					for (int i = 0; i<k; i++)
						cout << "host pow(x_minus_tmp["<< i << "],alpha) = "  << pow(x_minus_tmp[i], alpha) << endl;
				}
			}

			if (k == 275)
				cout << "host X_minus[" << q << "] = " << X_minus[q] << endl; */
		}

		//Produce potential parameter sets as a function of  Alpha, Sigma, Kappa which will be used for maximization
		for (int q = 0; q < sizeA; q++){

			double alpha = start_A + q * inc_A;
			Kappa[q][k] = pow( X_minus[q] / X_plus[q], 1 / (2*(alpha +1)));

			Sigma[q][k] = pow((alpha*  pow( X_minus[q]*X_plus[q], alpha/ (2*(alpha+1)))\
				* ( pow(X_plus[q], 1/(alpha+1)) + pow(X_minus[q], 1/(alpha+1)))), 1/alpha);

			double sgngam = 0;
			H[q][k] = log(Sigma[q][k]) + locallngamma(1/alpha, &sgngam) \
				+ log(Kappa[q][k] + 1/Kappa[q][k]) + 1/alpha - log(alpha);

			//cout << "Kappa : " << Kappa[q* N + k] << "\t" << "Sigma : " << Sigma[q* N + k] << "\t"<< "H :" << H[q* N + k] << endl;
			// keep record of the minimum entropy index
			if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
				min_H = H[q][k];
				idx_k = k;
				idx_q = q;
			}
		}

		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
	}

	///////////////////////////////////////////////////////////////////////////////////////////
	// Debug code to print out and compare
	//////////////////////////////////////////////////////////////////////////////////////////
/*	for (int q = 0; q < sizeA; q++){
		double minH2 = 1e10;
		double tempKappa;
		double tempSigma;
		for (int k = 0; k < N; k++){
			if (H[q][k] < minH2){
				minH2 = H[q][k];
				tempKappa = Kappa[q][k];
				tempSigma = Sigma[q][k];
			}
		}
		cout << "Kappa[" << q << "] = :" << tempKappa;
		cout << "      Sigma[" << q << "] = :" << tempSigma;
		cout << endl;
	}
*/
	///////////////////////////////////////////////////////////////////////////////////////////
	// end debug
	//////////////////////////////////////////////////////////////////////////////////////////
	int dataIndex2 = 0;
	int dataTmp2 = TablePointer[dataIndex2].NumberofElements;
	while (idx_k > dataTmp2-1)	// dataTmp-1 because indexAlpha is index 0 while dataTmp is index 1
		dataTmp2 += TablePointer[++dataIndex2].NumberofElements;
	params[0] = TablePointer[dataIndex2].ValueofElement; // theta
	params[1] = Sigma[idx_q][idx_k]; // sigma
	params[2] = Kappa[idx_q][idx_k]; // kappa
	params[3] = start_A + idx_q * inc_A; // alpha

	for (int i = 0 ; i< sizeA ; i++){
		delete [] Kappa[i];
		delete [] Sigma[i];
		delete [] H[i];

	}

	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	delete [] X_minus;
	delete [] X_plus;
	free (numValuesXplus);
	free (numValuesXminus);
	free (tableValues);
	free (changeTableValues);
	return;
}
