/************************************************************************
 * This file is made to write and test GPU Kernels launching different
 * alpha values.
 *
 * At the peak there will be 80 x 500 x 512 threads launched.
 *************************************************************************/

#include <vector>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include "math_functions.h"
#define windowSize 500
#define threadsPerBlock 512
#define EDGE 5
#define ae_pi 3.1415926535897932384626433832795
using namespace std;

double locallngamma(double x, double* sgngam);
double onealphaAEPD_Fit(double *x, int N, double *params, double alpha );
void localAEPD_Fit(double * x, int N, double * params);

/***********************************************************************
 * KernelTest2D tests launching 80x500x500 threads
 *
 **********************************************************************/

__global__ void  kernelTest2D(double * dev_data, double *dev_alpha, double *dev_plusMinus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID % windowSize;
	int alphaIndex = blockID / windowSize;
	int threadsInBlock = blockDim.y*blockDim.x;

	__shared__ double temp[threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 * blockID = 501 -> dataIndex = 1, alphaIndex = 1,
	 * p1 = dev_data[1], alpha = dev_alpha[1]
	 * temp[0,1] = abs(dev_data[tid] - dev_data[1])*alphaIndex
	 * temp[2-512] = 0
	 *
	 */

	double alpha = dev_alpha[alphaIndex];
	double p1 = dev_data[dataIndex];

	if (tid <= dataIndex){
		temp[tid] = abs((dev_data[tid]-p1))*alpha;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp[tid] = 0;
	}
	else {
		temp[tid] = 0;
	}

	__syncthreads();

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp[tid] += temp[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Copy back based on blockID,
	// Each block produced a unique result, data needs to go in the BIG array
	if (tid == 0)
		dev_plusMinus[blockID] = temp[0];
} // end kernelTest2D


/********************************************************************************
 * testCudaCopyWrapper is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 *******************************************************************************/


void testCudaLaunchWrapper(double *data, double *manipulatedData)
{

	/***************************************************************************
	 *
	 * kernelTest2D is a CUDA kernel to test global memory addressing:
	 *
	 * dev_data will be [500], dev_alpha will be [80]
	 * dev_plusminus will be [500x80]
	 *
	 ***************************************************************************/
    double *dev_data, *dev_alpha, *dev_plusMinus, *dev_debugBig, *dev_debug;
    double *alpha, *debug1, *debug2, *debugBig;

    int lengthDevData = 500;
    int lengthDevAlpha = 80;

    // allocate host memory
    alpha = (double *) malloc (lengthDevAlpha*sizeof(double));
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    for (int i = 0; i < lengthDevAlpha; i++)
    	alpha[i] = i+1;

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_alpha, sizeof(double) * lengthDevAlpha );
    cudaMalloc( (void **)&dev_plusMinus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy data to dev_a
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_alpha, alpha, sizeof(double) * lengthDevAlpha, cudaMemcpyHostToDevice );

    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    kernelTest2D <<<Grid, Block>>> (dev_data, dev_alpha, dev_plusMinus, dev_debugBig, dev_debug);

	/************************************************************************
	 * Debugging for kernelTest2D
	 ***********************************************************************/

    cudaMemcpy(debugBig , dev_plusMinus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    for (int i = 0; i < lengthDevData; i++){
    	for (int j = 0; j < 5; j++){
    		cout << "      temp[" << j*lengthDevData + i << "] = " << debugBig[j*lengthDevData + i];
    	}
    	cout << endl;
    }

	/************************************************************************
	 * End debugging for kernelTest2D
	 ***********************************************************************/

	/************************************************************************
	 * End kernelTest2D
	 ***********************************************************************/


	// Free memories on host and device
	cudaFree(dev_data);
	cudaFree(dev_alpha);
	cudaFree(dev_plusMinus);
	cudaFree(dev_debugBig);
	cudaFree(dev_debug);
	free(alpha);
	free(debug1);
	free(debug2);
	free(debugBig);

	return;
} // end testCudaLaunchWrapper

/***********************************************************************************************************
 * kernelCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
 * and each alpha value, so it will calculate 500 x 80 = 40,000 values.
 *
 * The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
 * each storing 40,000*8 bytes = 320,000 bytes
 *
 * Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
 * So we need to launch another Kernel to do the final calculations of X_minus.
 *
 * The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
 *	for( int q = 0; q< 512; q++){
 *		if (q < X_left)
 *			x_minus_tmp[q] = abs(data[q] - p1);
 *		else
 *			x_plus_tmp[q - X_left] = data[q] - p1;
 *	}
 *	for (int t = 0; t < 512; t++){
 *		if (t < X_left)
 *			X_minus += pow(x_minus_tmp[t], alpha) / 512;
 *		else
 *			X_plus += pow(x_plus_tmp[t - X_left], alpha)/ 512;
 *************************************************************************************************************/

__global__ void  kernelCalculateXplusXminus (double * dev_data, double *dev_alpha, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID % windowSize;
	int alphaIndex = blockID / windowSize;
	int threadsInBlock = blockDim.y*blockDim.x;
	double alpha = dev_alpha[alphaIndex];
	double p1 = dev_data[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 * blockID = 501 -> dataIndex = 1, alphaIndex = 1,
	 * p1 = dev_data[1], alpha = dev_alpha[1]
	 * temp[0,1] = abs(dev_data[tid] - dev_data[1])*alphaIndex
	 * temp[2-512] = 0
	 *
	 **************************************************************************/


	if (tid <= dataIndex){
		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;

	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp_X_minus[tid] += temp_X_minus[tid + i];
			temp_X_plus[tid] += temp_X_plus[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Copy back based on blockID,
	// Each block produced a unique result, data needs to go in the BIG array
	if (tid == 0){
		dev_X_plus[blockID] = temp_X_plus[0];
		dev_X_minus[blockID] = temp_X_minus[0];
	}
}// end kernelCalculateXplusXminus


/***********************************************************************************************
 * kernelCalculateSumXplusXminus() sums the values in
 * global memories dev_X_minus[] and dev_X_plus[]
 *
 * There are 500 * 80 elements in dev_X_minus[] and dev_X_plus[]
 *
 * int blockID = blockIdx.x*gridDim.y + blockIdx.y;	// blockID range = [0 <-> 500*80 -1]
 * int blockVaryingIndex = blockID % windowSize; 	// blockVaryingIndex range = [ 0 <-> 500 -1]
 * int dataVaryingIndex = blockID / windowSize;		// dataVaryingIndex range = [ 0 <-> 80 -1]
 *
 * EX: for blocks with dataVaryingIndex = 0 (500 blocks)
 *
 * These 500 blocks get the same data from dev_X_minus[] and dev_X_plus[]
 * Then each block within those 500 blocks takes a portion of the data:
 *
 * Ex; For blockVaryingIndex = i, k = i, then X_minus will sum dev_X_minus from 0 to i and put the result in
 *
 * This Kernel does not need any data to be copied in or out.  It is simply a calculation Kernel.
 *
 **********************************************************************************************/

__global__ void kernelCalculateSumXplusXminus (double * dev_X_plus, double * dev_X_minus, double * dev_sumXplus, double * dev_sumXminus, double *dev_debugBig)
{
	///////////////////////////////////////////////////////////////////////
	//
	//  Launch Code for this Kernel:
	//
	//  dim3 Grid(lengthDevData, lengthDevAlpha);
    //	dim3 Block(16, 32);
	//
	//  In order for all blocks to achieve unique ID
	//  blockID = blockIdx.x*gridDim.y + blockIdx.y
	//  or
	//  blockID = blockIdx.y*gridDim.x + blockIdx.x
	//
	//	Assume lengthDevData = 500, lengthDevAlpha = 2
	//	blockID will be the index to into the 2D array
	//
	//
	////////////////////////////////////////////////////////////////////////

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int blockVaryingIndex = blockID % windowSize;		// blockVaryingIndex range = [ 0 <-> 500 -1]
	int dataVaryingIndex = blockID / windowSize;		// dataVaryingIndex range = [ 0 <-> 80 -1]
	int threadsInBlock = blockDim.y*blockDim.x;

	/////// IMPORTANT NEED TO MAKE SURE THAT threadsInBlock == threadsPerBlock  ////////
	__shared__ double temp [threadsPerBlock];
	__shared__ double temp2 [threadsPerBlock];
	temp[tid] = 0;
	temp2[tid] = 0;

	////////////////////////////////////////////////////////////////////////////////////
	//
	// Use dataVaryingIndex and WindowSize to retrieve correct data.
	// perform Running sum up to dataIndex
	//
	// Setting EDGE effects: From AEPD_Fit()
	// 		for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	//
	// Here stepX = 1;
	//
	//	This is what I want the code to do:
	//
	//	for (int i = EDGE-1 ; i < windowSize - EDGE - 1; i++){
	//		runningSumXplus += dev_X_plus[i];
	//		cout << "X_plus[" << i << "] = " << runningSumXplus << endl;
	//	}
	//
	//  For block 4: (i = 4)
	//  X_plus[4] = 0 + dev_X_plus[4];
	//
	//  step 1:
	//  for (blockID in range)
	//  	temp2[tid] = dev_X_plus[tid] as long as tid <= blockID
	//
	////////////////////////////////////////////////////////////////////////////////////

	if(blockVaryingIndex >= EDGE-1 && blockVaryingIndex <= windowSize-EDGE-1){
		if (tid <= blockVaryingIndex && tid >= EDGE-1 && tid <= windowSize-EDGE-1){
			///////////////////////////////////////////////////////////////////////////////////////////
			//
			// Index to the correct data
			// dev_X_minus and dev_X_plus have 80 chunks of 500 elements
			// The blocks launched in this kernel have 80 dataVaryingIndex values
			// Each value of dataVaryingIndex gets a different chunk of dev_X_minus[] and dev_X_plus[]
			//
 			// EX: if dataVaryingIndex = 0, then these blocks should be accessing dev_X_minus[0-499]
			//	   if dataVaryingIndex = 1, then these blocks should be accessing dev_X_minus[500-999]
			//	   if dataVaryingIndex = J, then these blocks should be accessing dev_X_minus[J*500-J*500+499]
			//
			//////////////////////////////////////////////////////////////////////////////////////////
			temp[tid] = dev_X_minus[dataVaryingIndex*windowSize + tid];
			temp2[tid] = dev_X_plus[dataVaryingIndex*windowSize + tid];
		}
		else{
			temp2[tid] = 0;
			temp[tid] = 0;
		}
	}
	else{
		temp2[tid] = 0;
		temp[tid] = 0;
	}
	__syncthreads();

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished up indexing, now each of the 500 * 80 blocks should contain a unique
	// configuration of threads.
	///////////////////////////////////////////////////////////////////////////////////////

	///////////////////////////////////////////////////////////////////////////////////////
	// Sum up the temp[] for all blocks.  temp[0] holds the sum for that particular block
	///////////////////////////////////////////////////////////////////////////////////////

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp[tid] += temp[tid + i];
			temp2[tid] += temp2[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	/////////////////////////////////////////////////////////////////////////////////////////
	//
	// Assign sums to dev_sumXminus[] and dev_sumXplus[]
	// each block writes to a unique index of dev_sumXminus[] and dev_sumXplus[]
	// use dataVaryingIndex * windowSize + blockVaryingIndex to get the unique index
	//
	///////////////////////////////////////////////////////////////////////////////////////
	if(tid == 0){
		dev_sumXminus[dataVaryingIndex*windowSize + blockVaryingIndex] = temp[0];
		dev_sumXplus[dataVaryingIndex*windowSize + blockVaryingIndex] = temp2[0];
	}

} // end kernelCalculateSumXplusXminus

///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelcalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_sumXminus[] and dev_sumXplus[] that were calculated by
// the kernelCalculateSumXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelCalculateParameters (double *dev_sumXminus, double *dev_sumXplus, double *dev_H, double *dev_Kappa, double *dev_Sigma, double *dev_dataParam, double *dev_data, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugKCP, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_sumXplus[] and dev_sumXminus[] 500 x 80 long
	// The layout of dev_sumXplus[] and dev_sumXminus[] is that
	// The dev_sumXplus[index - index + 499] are the X_plus values
	// for that alpha[i] where index = i*80
	//
	// Ex: i = 0, dev_sumXplus[0-499] is calculated with alpha[0]
	//     i = 1, dev_sumXplus[500-999] is calculated with alpha[1]
	//
	// Each block of this Kernel will read 500 values of dev_sumXplus
	// Each thread within those blocks (tid < 500) will then map to an
	// individual value of dev_sumXplus[]
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	// Indexing example: for blockID = 5: tid = 499, 	X_minus = dev_sumXminus[2999]
	//					 for blockID = 6: tid = 0,   	X_minus = dev_sumXminus[3000]
	// 					 for blockID = 79: tid = 499, 	X_minus = dev_sumXminus[39999]
	//////////////////////////////////////////////////////////////////////////////////////////

	if (tid < windowSize){
		X_minus = dev_sumXminus[blockID*windowSize + tid];
		X_plus = dev_sumXplus[blockID*windowSize + tid];
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}
		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////

//	dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = dev_data[indexValue[0]];
		dev_H [blockID] = tempH[0];
	}

} // end kernelCalculateParameters



/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/


void CUDAAEPD_DistWrapper (double *data, double *param){


	/****************************************************************************
	 * Sort the data as onealphaAEPD_Fit() does
	 ****************************************************************************/

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	/***************************************************************************
	 *
	 * kernelCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	 *
	 * dev_data will be [500], dev_alpha will be [80]
	 * dev_X_plus and dev_X_minus will be [500x80]
	 *
	 ***************************************************************************/
    double *dev_data, *dev_alpha, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug, *dev_lnGammaofAlpha;
    double *alpha, *debug1, *debug2, *debugBig, *logGammaofAlpha;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    alpha = (double *) malloc (lengthDevAlpha*sizeof(double));
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));
    logGammaofAlpha = (double *) malloc (lengthDevAlpha*sizeof(double));

    // Set values of alpha and log Gamma of Alpha
	double sgngam = 0;

    for (int i = 0; i < lengthDevAlpha; i++){
    	alpha[i] = 0.7 + i*0.1;
    	logGammaofAlpha[i] = locallngamma(1/alpha[i], &sgngam);
    }


    cudaMemcpy( dev_lnGammaofAlpha, logGammaofAlpha, sizeof(double)*lengthDevAlpha, cudaMemcpyHostToDevice );

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_alpha, sizeof(double) * lengthDevAlpha );
    cudaMalloc( (void **)&dev_lnGammaofAlpha, sizeof(double) * lengthDevAlpha );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy data to dev_data, alpha to dev_alpha, and log gamma of alpha to device
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_alpha, alpha, sizeof(double) * lengthDevAlpha, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_lnGammaofAlpha, logGammaofAlpha, sizeof(double)*lengthDevAlpha, cudaMemcpyHostToDevice );

    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    kernelCalculateXplusXminus <<<Grid, Block>>> (dev_data, dev_alpha, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);

	/************************************************************************
	 * Debugging for kernelCalculateXplusXminus
	 ***********************************************************************/
/*
    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    cout << "For alpha = 4.7" << endl;
    for (int i = 0; i < lengthDevData; i++){
    	int offset = 40*500;
    	cout << "tmp_X_minus[" << i << "] = " << debugBig[offset+i] << endl;
    }
*/
	/************************************************************************
	 * End debugging for kernelCalculateXplusXminus
	 ***********************************************************************/

	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    //////////////////////////////////////////////////////////////////////////
    // kernelCalculateSumXplusXminus sums the values in
    // global memories dev_X_minus[] and dev_X_plus[] in order to
    // calculate X_plus and X_minus from AEPD_Fit()
    //
    // There are 500x80 elements in dev_X_minus[] and dev_X_plus[]
    //
    // kernelCalculateSumXplusXminus launches 500x80x512
    //
    // This Kernel does not need any data to be copied in or out.  It is simply a calculation Kernel.
    //
    //////////////////////////////////////////////////////////////////////////

    double *dev_sumXplus, *dev_sumXminus;

    cudaMalloc( (void **)&dev_sumXplus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_sumXminus, sizeof(double) * lengthDevAlpha * lengthDevData );

    // launch with same grid and block dimensions as kernelCalculateXplusXminus

    kernelCalculateSumXplusXminus <<< Grid, Block >>> (dev_X_plus, dev_X_minus, dev_sumXplus, dev_sumXminus, dev_debugBig);


	/////////////////////////////////////////////////////////////////////////
	// Debugging for kernelCalculateSumXplusXminus
    /////////////////////////////////////////////////////////////////////////

/*	cudaMemcpy(debugBig , dev_sumXminus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cout << "For alpha = 4.7" << endl;
	for (int i = 0; i < lengthDevData; i++){
		int offset = 40*500;
		cout << "X_minus[" << i << "] = " << debugBig[offset+i] << endl;
	}

	cout << "For X_plus" << endl;
	cudaMemcpy(debugBig , dev_sumXplus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
	for (int i = 0; i < lengthDevData; i++){
		int offset = 40*500;
		cout << "X_plus[" << i << "] = " << debugBig[offset+i] << endl;
	}
*/

	/////////////////////////////////////////////////////////////////////////
	// End debugging for kernelCalculateSumXplusXminus
    /////////////////////////////////////////////////////////////////////////

	// dev_X_plus and dev_X_minus are not used anymore, option to free memory here
/*
	cudaFree(dev_X_plus);
	cudaFree(dev_X_minus);
*/
	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateSumXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
	//
    // This Kernel uses the values of dev_sumXminus and dev_sumXplus that were calculated by
    // the kernelCalculateSumXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
	// This is done to be able to debug as each block simulates one alpha
	// because of the fact that we are trying to find a minimum over all H[k][q]
	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
	int KCP_devSize =  80; // Need to fix this to be written dynamically later

    double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_debugKCP, *dev_H;
    double *Kappa, *Sigma, *dataParam, *H;

    // Allocate host memory for Kappa, Sigma, dataParam
    Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
    Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
    dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
    H = (double *) malloc(sizeof(double)*lengthDevAlpha);

    // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
    cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_dataParam, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_debugKCP, sizeof(double) * KCP_devSize);
    dim3 BlockKCP(16,32);
    dim3 GridKCP(10,8);

    kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_sumXminus, dev_sumXplus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugKCP, dev_debugBig);
	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );


	/////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////
/*
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     dataParam[" << i << "] = " << dataParam[i];
		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}
*/

	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 0; i<lengthDevAlpha; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = alpha[indexAlpha]; // alpha

	// Free memories on host and device
	cudaFree(dev_data);
	cudaFree(dev_alpha);
	cudaFree(dev_X_plus);
	cudaFree(dev_X_minus);
	cudaFree(dev_sumXplus);
	cudaFree(dev_sumXminus);
	cudaFree(dev_debugBig);
	cudaFree(dev_debug);
	cudaFree(dev_lnGammaofAlpha);
	cudaFree(dev_Kappa);
	cudaFree(dev_Sigma);
	cudaFree(dev_dataParam);
	cudaFree(dev_debugKCP);
	cudaFree(dev_H);

	free(alpha);
	free(logGammaofAlpha);
	free(debug1);
	free(debug2);
	free(debugBig);
	free(H);
	free(Kappa);
	free(Sigma);
	free(dataParam);
	return;
}// end CUDAAEPD_DistWrapper



int main(void){
	/////////////////////////////////////////////////////////////////////////
	// Test to see the maximum # of threads launched is exceeded.
	/////////////////////////////////////////////////////////////////////////
	 double *data, *manipulatedData;
	 data = (double *) malloc(512*sizeof(double));
	 manipulatedData = (double *) malloc(512*sizeof(double));
	 for (int i = 0; i<512; i++){
		 data[i] = i*0.01;
	 }
//	 testCudaLaunchWrapper(data, manipulatedData);

	 //////////////////////////////////////////////////////////////////////
	 // Now test alek's file with one launch
	 // First read in the data array (500 points) and then pass
	 // it to the CUDA wrapper
	 /////////////////////////////////////////////////////////////////////
		double *dataArray, *param;
		dataArray = (double *) malloc (windowSize*sizeof(double));
		param = (double *) malloc (4 * sizeof(double));
		fstream myfile("gbpusd.txt", ios::in);
		if (!myfile.is_open()){
			cout << "Error opening gbpusd text file " << endl;
			return 1;
		}
		double d = 0;
		int index= 0;
		while (index < windowSize){
			myfile >> d;
			dataArray[index++] = d;
		}
		// Finished reading and assigning data

		///////////////////////////////////////////////////////////////////////
		// Call Cuda version of AEPDFit();
		///////////////////////////////////////////////////////////////////////

		CUDAAEPD_DistWrapper (dataArray, param);


	    cout << "dataParam = " << param[0] << endl;
	    cout << "Sigma = " << param[1] << endl;
	    cout << "Kappa = " << param[2] << endl;
	    cout << "Alpha = " << param[3] << endl;
		/////////////////////////////////////////////////////////////////////////
		// Call C++ Version of onealphaAEPDFit() : AEPDFit() for one parameter.
		/////////////////////////////////////////////////////////////////////////

		// Pass it a constant alpha paramater (also the one used in CUDA version above.
		// onealphaAEPD_Fit(dataArray, windowSize, param, 4.7);
//		cout << "Finding X_minus and X_plus via C++" << endl;
//		onealphaAEPD_Fit(dataArray, windowSize, param, 4.7);

		////////////////////////////////////////////////////////////////////////////
		// end debug by passing varying parameters of alpha.
		///////////////////////////////////////////////////////////////////////////
		double *alpha;
		alpha = (double *) malloc(sizeof(double) * 80);

		///////////////////////////////////////////////////////////////////////////////
		// debug the entire range of alpha
		double minH = 1e10;
		int indexAlpha = 0;

		double Kappa, Sigma, dataParam, H;

		Kappa = 0;
		Sigma = 0;
		dataParam = 0;
		H = 1e10;

		for (int i = 0; i< 80; i++){
			alpha[i] = 0.7 + i*0.1;
			H = onealphaAEPD_Fit(dataArray, windowSize, param, alpha[i]);
/*			cout << "Kappa[" << i << "] = " << param[2];
			cout << "     Sigma[" << i << "] = " << param[1];
			cout << "     dataParam[" << i << "] = " << param[0];
			cout << "     alpha[" << i << "] = " << alpha[i];
			cout << endl;
*/

			if (i<80-2 && i > 0 && H< minH){
				minH = H;
				indexAlpha = i;

				dataParam = param[0];
				Sigma = param[1];
				Kappa = param[2];
			}
		}
		cout << "Calculations from host side:" << endl;

		cout << "Kappa = " << Kappa;
		cout << "     Sigma = " << Sigma;
		cout << "     dataParam = " << dataParam;
		cout << "     alpha = " << alpha[indexAlpha];
		cout << endl;

		myfile.close();
		free (dataArray);
		free (param);
		free (alpha);
		return 0;
}



/***************************************************************
 * onealphaAEPD_Fit calculates the best parameters for one fixed value of alpha
 * It is written to aid in testing of the CUDA code.
 *
 * it returns the minimum entropy value
 ***************************************************************/

double onealphaAEPD_Fit(double *x, int N, double * params, double alpha){
	vector<double> data(x, x+N);
	double tmp;
	for(int i = 0;i < N-1 ;i++){
		for (int j = i + 1;j < N;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	//double INCREMENT_ALPHA = 0.1;
	//int sizeALPHA = 80;
	int NUM_STEPS = 500;

	//double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	//double start_A = alpha;//////// Value to Start Alpha Iteration
	//int sizeA = sizeALPHA;//// Number of iterations to perform

	double * Kappa = new double[N];
	double * Sigma = new double[N];
	double * H = new double [N];

	double X_minus = 0;
	double X_plus = 0;

	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{

		double p1 = data[k];

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

		for( int q = 0; q< N; q++){
			if (q < X_left)
				x_minus_tmp[q] = abs(data[q] - p1);
			else
				x_plus_tmp[q - X_left] = data[q] - p1;
		}
		for (int t = 0; t < N; t++){
			if (t < X_left)
				X_minus += pow(x_minus_tmp[t], alpha) / N;
			else
				X_plus += pow(x_plus_tmp[t - X_left], alpha)/ N;
		}


		/************************************************************************
		 * Code to debug X_minus and X_plus calculation on GPU
		 **********************************************************************/
		// print out x_minus_tmp[] for k = 300

/*		if (k == 300){
			cout << "X_minus on host side = " << X_minus << "         X_plus = " << X_plus << endl;
//			cout << "Calculate X_minus by multiplying and summing" << endl;
//			cout << "X_minus = " << 8.23893e-21 * 275 + 3.16979e-22*2 << endl;
			for (int i = 0; i < N; i++){
				if (i < X_left)
					cout << "(x_minus_tmp[" << i <<"] ^ alpha ) / N = "<< pow (x_minus_tmp[i],alpha)/N << endl;
				else
					cout << "(x_plus_tmp[" << i <<"] ^ alpha ) / N = "<< pow (x_plus_tmp[i-X_left],alpha)/N << endl;

			}
		}
*/
		// print out x_plus_tmp[] for k = 5

/*		if (k == 5){
			cout << "temp2 on the host side for k = 5" << endl;
			for (int i = 0; i < windowSize; i++){
				if (i>= X_left)
					cout << "temp2[" << i << "] = "<< pow (x_plus_tmp[i-X_left],alpha)/N << endl;
					cout << "(data["<<i <<"]-p1) =" << data[i] - p1 << endl;
					cout << "(data["<<i <<"]-p1)^alpha/N = " << pow (data[i]-p1,alpha)/N << endl;
			}
		}
*/

		// Print out all X_minus[] and X_plus[]
//		cout << "X_minus[" << k <<"] = "<< X_minus << "         X_plus[" << k << "] = "<< X_plus << endl;

		/****************************************************************************
		 * End of debugging X_minus and X_plus.
		 ***************************************************************************/


		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));

		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		double sgngam = 0;
		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
			+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
		/*******************************************************************************************
		 * Debugging Kappa, Sigma and H
		 ******************************************************************************************/
		// Debug Kappa and Sigma.

//		cout << "Kappa[" << k <<"] = " << Kappa[k] << "         Sigma[" << k << "] = "<< Sigma[k] << endl;
//    	cout << "H[" << k << "] = " << H[k] << endl;
		// end Kappa and Sigma, H debug.


		if (H[k] < min_H){
			min_H = H[k];
			idx_k = k;
		}




		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
		/*if (k == 275){
			cout << "K = 275" << endl;
			cout << "Sigma[275] = " << Sigma[k] << endl;
			cout << "Kappa[275] = " << Kappa[k] << endl;
			cout << "H[275] = " << H[k] << endl;
			cout << "X_minus given k = " << X_minus<< endl;
			cout << "X_plus given k = " << X_plus<< endl;
		}*/
	}
	params[0] = data[idx_k]; // theta
	params[1] = Sigma[idx_k]; // sigma
	params[2] = Kappa[idx_k]; // kappa
	params[3] = alpha; // alpha
	//cout << "idx_k = " << idx_k << endl;
    //cout << "Param 0 = " << params[0] << endl;
    //cout << "Param 1 = " << params[1] << endl;
    //cout << "Param 2 = " << params[2] << endl;
	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	return min_H;
} // End onealphaAEPD_Fit()



/*************************************************************************************
 * Log- Gamma function adapted from Alglib project
 *
 * Copy right and comments from original:
 ***************************************************************************************/

double locallngamma(double x, double* sgngam)
{

    double a;
    double b;
    double c;
    double p;
    double q;
    double u;
    double w;
    double z;
    int i;
    double logpi;
    double ls2pi;
    double tmp;
    double result;

    *sgngam = 0;

    *sgngam = 1;
    logpi = 1.14472988584940017414;
    ls2pi = 0.91893853320467274178;
    if( x<-34.0 )
    {
        q = -x;
        w = locallngamma(q, &tmp);
        p = floor(q);
        i = floor(p+0.5);
        if( i%2==0 )
        {
            *sgngam = -1;
        }
        else
        {
            *sgngam = 1;
        }
        z = q-p;
        if( z>0.5 )
        {
            p = p+1;
            z = p-q;
        }
        z = q*sin(ae_pi*z);
        result = logpi-log(z)-w;
        return result;
    }
    if( x<13 )
    {
        z = 1;
        p = 0;
        u = x;
        while(u>=3)
        {
            p = p-1;
            u = x+p;
            z = z*u;
        }
        while(u<2)
        {
            z = z/u;
            p = p+1;
            u = x+p;
        }
        if( z<0 )
        {
            *sgngam = -1;
            z = -z;
        }
        else
        {
            *sgngam = 1;
        }
        if( u==2)
        {
            result = log(z);
            return result;
        }
        p = p-2;
        x = x+p;
        b = -1378.25152569120859100;
        b = -38801.6315134637840924+x*b;
        b = -331612.992738871184744+x*b;
        b = -1162370.97492762307383+x*b;
        b = -1721737.00820839662146+x*b;
        b = -853555.664245765465627+x*b;
        c = 1;
        c = -351.815701436523470549+x*c;
        c = -17064.2106651881159223+x*c;
        c = -220528.590553854454839+x*c;
        c = -1139334.44367982507207+x*c;
        c = -2532523.07177582951285+x*c;
        c = -2018891.41433532773231+x*c;
        p = x*b/c;
        result = log(z)+p;
        return result;
    }
    q = (x-0.5)*log(x)-x+ls2pi;
    if(x>100000000 )
    {
        result = q;
        return result;
    }
    p = 1/(x*x);
    if( x>=1000.0 )
    {
        q = q+((7.9365079365079365079365*0.0001*p-2.7777777777777777777778*0.001)*p+0.0833333333333333333333)/x;
    }
    else
    {
        a = 8.11614167470508450300*0.0001;
        a = -5.95061904284301438324*0.0001+p*a;
        a = 7.93650340457716943945*0.0001+p*a;
        a = -2.77777777730099687205*0.001+p*a;
        a = 8.33333333333331927722*0.01+p*a;
        q = q+a/x;
    }
    result = q;
    return result;

} // end locallngamma

//////////////////////////////////////////////////////////////////////////////////////////////////////
//
// localAEPD_fit
// This function mimics AEPD_Fit(), has no external calls, this function will iterate over all alpha values
//
//////////////////////////////////////////////////////////////////////////////////////////////////////

void localAEPD_Fit(double * x, int N, double * params){

	// Take the array in x[0] - x[N-1] and put that into the vector data.
	vector<double> data(x, x+N);

	/*for (int i = 0; i < N; i++){
//		cout << "X[" << i << "] = " << x[i]<< endl;
		cout << "Before Sort data[" << i << "] = "<< data[i]<<endl;
	}*/


	/* Sorts the data: can be done in InArray.updateArray */
	double tmp;
	for(int i = 0;i < N-1 ;i++){
		for (int j = i + 1;j < N;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}


	///sort(data.begin(),data.end());

	//for (int i = 0;i< N; i++){
	//	cout << "data sorted element " << i << " = " << data[i] << endl;
	//}

	//for (int i = 0;i< 4; i++){
	//	cout << "params " << i << " = " << params[i] << endl;
	//}
	double INCREMENT_ALPHA = 0.1;
	int sizeALPHA = 80;
	int NUM_STEPS = 500;

	double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	double start_A = 0.7;//////// Value to Start Alpha Iteration
	int sizeA = sizeALPHA;//// Number of iterations to perform

	double ** Kappa = new double*[sizeA];
	double ** Sigma = new double*[sizeA];
	double ** H = new double*[sizeA];
	for (int i = 0 ; i< sizeA ; i++){
		Kappa[i] = new double[N];
		Sigma[i] = new double[N];
		H[i] = new double[N];
	}

	double * X_minus = new double[sizeA];
	double * X_plus = new double[sizeA];


	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;

	//int stepX = round(N/NUM_STEPS);
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{

		double p1 = data[k];

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

		for( int q = 0; q< N; q++){
			if (q < X_left)
				x_minus_tmp[q] = abs(data[q] - p1);
			else
				x_plus_tmp[q - X_left] = data[q] - p1;
		}

		/* start_A = 0.7
		 * q = [0 -> sizeA]
		 * inc_A = incrementAlpha
		 * alpha = 0.7 + q * inc_A
		 * Basically alpha goes from the start value through sizeA increments of inc_A 		 *
		 */


		for ( int q = 0; q < sizeA; q++){
			X_minus[q] = 0;
			X_plus[q] = 0;

			double alpha = start_A + q * inc_A;
			/* Goes through the array, If this is matlab then the code would be:
			 * alpha[sizeA] = 0.7 + sizeA*inc_A
			 * data[N] = data vector coming in
			 * k = index value			 *
			 * x_minus_tmp [k];
			 * x_plus_tmp[N-k];
			 * x_minus_tmp = abs( data[0-k] - data[k] );
			 * x_plus_tmp = data[k-N] - data[k];
			 * X_minus[q] = sum (x_minus_tmp.^alpha[q])/N;
			 * X_plus[q] = sum (x_plus_tmp.^alpha[q])/N;
			 *
			 * So the outer loop pins k, the inner loop pins q.
			 * and for each nested loop we have X_minus[k, q] and X_plus [k,q]
			 *
			 */

			for (int t = 0; t < N; t++){
				if (t < X_left)
					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
				else
					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
			}


		}

		//Produce potential parameter sets as a function of  Alpha, Sigma, Kappa which will be used for maximization
		for (int q = 0; q < sizeA; q++){

			double alpha = start_A + q * inc_A;
			Kappa[q][k] = pow( X_minus[q] / X_plus[q], 1 / (2*(alpha +1)));

			Sigma[q][k] = pow((alpha*  pow( X_minus[q]*X_plus[q], alpha/ (2*(alpha+1)))\
				* ( pow(X_plus[q], 1/(alpha+1)) + pow(X_minus[q], 1/(alpha+1)))), 1/alpha);

			double sgngam = 0;
			H[q][k] = log(Sigma[q][k]) + locallngamma(1/alpha, &sgngam) \
				+ log(Kappa[q][k] + 1/Kappa[q][k]) + 1/alpha - log(alpha);

			//cout << "Kappa : " << Kappa[q* N + k] << "\t" << "Sigma : " << Sigma[q* N + k] << "\t"<< "H :" << H[q* N + k] << endl;
			// keep record of the minimum entropy index
			if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
				min_H = H[q][k];
				idx_k = k;
				idx_q = q;
			}
		}

		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
	}

	params[0] = data[idx_k]; // theta
	params[1] = Sigma[idx_q][idx_k]; // sigma
	params[2] = Kappa[idx_q][idx_k]; // kappa
	params[3] = start_A + idx_q * inc_A; // alpha

	for (int i = 0 ; i< sizeA ; i++){
		delete [] Kappa[i];
		delete [] Sigma[i];
		delete [] H[i];

	}

	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	delete [] X_minus;
	delete [] X_plus;
	//cout << "Got here with no problem "<< endl;
}
