/************************************************************************
 *
 * Compile for debugging: nvcc -arch=sm_13 -g -G localAEPD_fit.cu
 * Compile for performance: nvcc -arch=sm_20 -O localAEPD_fit.cu
 *
 *
 * Compile for extra performance with nvcc -use_fast_math -arch=sm_20 -O localAEPD_fit.cu
 * -use_fast_math slightly improves performance (40733 to 40333) so about 1% improvement
 *
 * This file is made to write and test GPU Kernels launching different
 * alpha values.
 *
 * At the peak there will be 80 x 500 x 512 threads launched.
 *************************************************************************/
#include <sys/time.h>
#include <sys/resource.h>
#include <unistd.h>
#include <vector>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
//#include "math_functions.h"
#define windowSize 500
#define threadsPerBlock 512
#define EDGE 5
#define ae_pi 3.1415926535897932384626433832795
using namespace std;

double locallngamma(double x, double* sgngam);
void localAEPD_Fit(double * x, int N, double * params);

/////////////////////////////////////////////////////////////////////////////
// Code imported from CPUTime.cpp:
// This code is used to check performance
//////////////////////////////////////////////////////////////////////////////

typedef struct timeval TimeVal, *TimeValPtr;
/* timeval is a struct with fields tv_sec for seconds and
                                   tv_usec for microseconds */

int GetTime (TimeVal &pTime)
{
  struct rusage usage;
  int status;

  if ((status= getrusage(RUSAGE_SELF,&usage))!=0)
    return (status);  /* return error code */

  pTime=usage.ru_utime; /* user time */

  return (status);
}

long GetTimeDifference (TimeVal &pStart, TimeVal &pStop)
{
  long usec, sec;
  std ::cout << "In Get Time Difference "<< std::endl;
  sec=pStop.tv_sec-pStart.tv_sec;    /* difference in seconds */
  usec=pStop.tv_usec-pStart.tv_usec; /* difference in microseconds */


  return ( sec*1000000l +  usec); /* result in microseconds */
}

////////////////////////////////////////////////////////////////////////
// End of code to check time
////////////////////////////////////////////////////////////////////////




// Constant memory, check the speed vs. global memory.
__constant__ double constantData[500];

/***********************************************************************************************************
 * kernelCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
 * and each alpha value, so it will calculate 500 x 80 = 40,000 values.
 *
 * The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
 * each storing 40,000*8 bytes = 320,000 bytes
 *
 * Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
 * So we need to launch another Kernel to do the final calculations of X_minus.
 *
 * The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
 *
 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
 *		p1 = data[k];
 *		X_left = k;
 *		for( int q = 0; q< N; q++){
 *			if (q < X_left)
 *				x_minus_tmp[q] = abs(data[q] - p1);
 *			else
 *				x_plus_tmp[q - X_left] = data[q] - p1;
 *		}
 *
 *		for ( int q = 0; q < sizeA; q++){
 *			for (int t = 0; t < N; t++){
 *				if (t < X_left)
 *					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
 *				else
 *					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
 *			}
 *		}
 *	}
 *
 *  EX: fix k; say k = 5
 *
 *  This gets to a set of 80 blocks
 *  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
 *
 *  Then fix q, say q = 10
 *  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
 *
 *  Sum up that result and put it in dev_X_minus and dev_X_plus
 *
 *************************************************************************************************************/


__global__ void  kernelCalculateXplusXminus (double * dev_data, double *dev_alpha, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q
	int threadsInBlock = blockDim.y*blockDim.x;
	double alpha = dev_alpha[alphaIndex];
	double p1 = dev_data[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *
	 *		p1 = data[k];
	 *		X_left = k;
	 *		for( int q = 0; q< N; q++){
	 *			if (q < X_left)
	 *				x_minus_tmp[q] = abs(data[q] - p1);
	 *			else
	 *				x_plus_tmp[q - X_left] = data[q] - p1;
	 *		}
	 *
	 *		for ( int q = 0; q < sizeA; q++){
	 *			alpha = start_A + q * inc_A;
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
	 *				else
	 *					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
	 *			}
	 *		}
	 *	}
	 *
	 *	Rewrite to:
	 *
	 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *		p1 = data[k];
	 *		X_left = k;
	 *		for ( int q = 0; q < sizeA; q++){
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += pow(abs(data[t]-p1), alpha) / N;
	 *				else
	 *					X_plus[q] += pow(abs(data[t]-p1), alpha)/ N;
	 *			}
	 *		}
	 *	}
	 *
	 *  EX: fix k; say k = 5
	 *  This maps to blockID's [400-479], alphaIndex [0-79], dataIndex = 5
	 *
	 *  p1 = data[dataIndex]
	 *  alpha = alpha[alphaIndex]
	 *
	 *  for each one of these blocks,
	 *
	 *  if tid < alphaIndex
	 *  	x_minus_tmp[tid] = abs(data[tid]-data[alphaIndex]);
	 *  else
	 *  	x_plus_tmp[q] = data[q]-data[alphaIndex];
	 *
	 *  Lets say blockID = 410, here dataIndex = 5, alphaIndex = 10
	 *
	 *  then for thread tid of this block:
	 *  if (tid <= 5)
	 *		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
	 *  else
	 *  	temp_X_minus[tid] = 0;
	 *
	 *
	 **************************************************************************/


	if (tid <= dataIndex){
		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = pow((dev_data[tid]-p1), alpha);
//		temp_X_plus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;

	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/////////////////////////////////////////////////////////////////////////
	// Debuggin for temp_X_minus
	/////////////////////////////////////////////////////////////////////////
/*
	if (dataIndex == 400 && alphaIndex == 40){
		//dev_debug[tid] = temp_X_minus[tid];
		//dev_debug[0] = p1;
		//dev_debug[1] = alpha;
		dev_debug[tid] = 1;
	}
	__syncthreads();
*/
	/////////////////////////////////////////////////////////////////////////
	// end debugging
	//////////////////////////////////////////////////////////////////////////

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp_X_minus[tid] += temp_X_minus[tid + i];
			temp_X_plus[tid] += temp_X_plus[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Copy back based on blockID,
	// Each block produced a unique result, data needs to go in the BIG array
	if (tid == 0){
		dev_X_plus[blockID] = temp_X_plus[0];
		dev_X_minus[blockID] = temp_X_minus[0];
	}
}// end kernelCalculateXplusXminus

///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelcalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_X_minus[] and dev_X_plus[] that were calculated by
// the kernelCalculateXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelCalculateParameters (double *dev_X_minus, double *dev_X_plus, double *dev_H, double *dev_Kappa, double *dev_Sigma, double *dev_dataParam, double *dev_data, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_X_plus[] and dev_X_minus[] 500 x 80 long
	// The layout of dev_X_plus[] and dev_X_minus[] is that
	// The dev_X_plus[index - index + 79] are the X_plus values
	// are generated by k = index
	//
	// Ex: k = 0, dev_X_plus[0-79] is calculated with alpha[0-79]
	//     k = 1, dev_X_plus[80-159] is calculated with alpha[0-79]
	//
	// alpha = start_A + q * inc_A;
	//
	// Because the calculations of Kappa, Sigma and H are independent of
	// everything except X_plus, X_minus, this kernel makes 500 * 80 of those
	// calculations and then finds the minimum values over 512 threads to produce 80 values
	// for Kappa, Sigma and H  (The difference of 512 and 500 is made up by dummy values)
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	//
	// Below, every thread gets an X_minus value
	// There are 80 blocks representing the 80 unique alpha values
	// Need to find the correct X_plus and X_minus indexes
	//
	// dev_plus[0-79] is calculated with alpha[0-79]
	// dev_plus[0,80,160....499*80] is calculated with alpha[0]
	//
	// blockID = 0, tid = 0 -> map it to dev_plus[0]
	// blockID = 0, tid = 1 -> map it to dev_plus[80]
	// blockID = 0, tid = 2 -> map it to dev_plus[160]
	//
	// blockID = 1, tid = 0 -> map it to dev_plus[1]
	// blockID = 1, tid = 1 -> map it to dev_plus[81]
	//
	// So mapping scheme is: X_plus = dev_plus[tid*80+blockID]
	//////////////////////////////////////////////////////////////////////////////////////////

	// doing this calculation here doesn't effect the timing much, actually increased it a bit
	int offset = tid * 80;
	if (tid < windowSize){
		X_minus = dev_X_minus[offset + blockID]/windowSize;
		X_plus = dev_X_plus[offset + blockID]/windowSize;
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		// This is the check for NaN, the nvcc compiler doesn't recognize code to check nan
		// if (tempKappa[tid] != tempKappa[tid]) -> condition for nan, but always returns false

		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}

		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////
//	debugBIG[]
//  dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = dev_data[indexValue[0]];
		dev_H [blockID] = tempH[0];
	}

} // end kernelCalculateParameters


/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/




void CUDAAEPD_DistWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}

	/***************************************************************************
	 *
	 * kernelCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	 *
	 * dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	 * dev_X_plus and dev_X_minus will be [500x80]
	 *
	 ***************************************************************************/
    double *dev_data, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy data to dev_data, alpha to dev_alpha, and log gamma of alpha to device
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );

    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    kernelCalculateXplusXminus <<<Grid, Block>>> (dev_data, dev_alpha, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);

	/////////////////////////////////////////////////////////////////////////
	// Debugging for kernelCalculateXplusXminus
	/////////////////////////////////////////////////////////////////////////
/*
    cudaMemcpy(debug1 , dev_debug, sizeof(double) * lengthDevData, cudaMemcpyDeviceToHost );
    cout << "debug threads for k = 250, q = 40" << endl;
    for (int i = 0; i < lengthDevData; i++){
    	cout << "device tmp_X_minus[" << i << "] = " << debug1[i] << endl;
    }

    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    cout << "For k = 275" << endl;
    for (int i = 0; i < lengthDevAlpha; i++){
    	int offset = 275*80;
    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
    }

    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    cout << "X_minus" << endl;
	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
	}
*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging for kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
	// This is done to be able to debug as each block simulates one alpha
	// because of the fact that we are trying to find a minimum over all H[k][q]
	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
	int KCP_devSize =  80; // Need to fix this to be written dynamically later

    double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
    double *Kappa, *Sigma, *dataParam, *H;

    // Allocate host memory for Kappa, Sigma, dataParam
    Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
    Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
    dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
    H = (double *) malloc(sizeof(double)*lengthDevAlpha);

    // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
    cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_dataParam, sizeof(double) * KCP_devSize);

    dim3 BlockKCP(16,32);
    dim3 GridKCP(10,8);

    kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );


	/////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////
/*	cout << "Debugging kernel calculate Parameters" << endl;
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     H[" << i << "] = " << H[i];
//		cout << "     dataParam[" << i << "] = " << dataParam[i];
//		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}

*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha





	// Free memories on host and device


	cudaFree(dev_data);
	//cudaFree(dev_alpha);
	cudaFree(dev_X_plus);
	cudaFree(dev_X_minus);
	//cudaFree(dev_sumXplus);
	//cudaFree(dev_sumXminus);
	cudaFree(dev_debugBig);
	cudaFree(dev_debug);
	//cudaFree(dev_lnGammaofAlpha);
	cudaFree(dev_Kappa);
	cudaFree(dev_Sigma);
	cudaFree(dev_dataParam);
	//cudaFree(dev_debugKCP);
	cudaFree(dev_H);

	//free(alpha);
	//free(logGammaofAlpha);
	free(debug1);
	free(debug2);
	free(debugBig);
	free(H);
	free(Kappa);
	free(Sigma);
	free(dataParam);
    return;
}	// end CUDAAEPD_DistWrapper


/***********************************************************************************************************
 * kernelconstantCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
 * and each alpha value, so it will calculate 500 x 80 = 40,000 values.
 *
 * The data is stored in constant memory since every thread needs to read the data only.
 *
 * The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
 * each storing 40,000*8 bytes = 320,000 bytes
 *
 * Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
 * So we need to launch another Kernel to do the final calculations of X_minus.
 *
 * The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
 *
 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
 *		p1 = data[k];
 *		X_left = k;
 *		for( int q = 0; q< N; q++){
 *			if (q < X_left)
 *				x_minus_tmp[q] = abs(data[q] - p1);
 *			else
 *				x_plus_tmp[q - X_left] = data[q] - p1;
 *		}
 *
 *		for ( int q = 0; q < sizeA; q++){
 *			for (int t = 0; t < N; t++){
 *				if (t < X_left)
 *					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
 *				else
 *					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
 *			}
 *		}
 *	}
 *
 *  EX: fix k; say k = 5
 *
 *  This gets to a set of 80 blocks
 *  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
 *
 *  Then fix q, say q = 10
 *  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
 *
 *  Sum up that result and put it in dev_X_minus and dev_X_plus
 *
 *************************************************************************************************************/



__global__ void  kernelconstantCalculateXplusXminus (double *dev_alpha, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q
//	int threadsInBlock = blockDim.y*blockDim.x;
	double alpha = dev_alpha[alphaIndex];
	double p1 = constantData[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *
	 *		p1 = data[k];
	 *		X_left = k;
	 *		for( int q = 0; q< N; q++){
	 *			if (q < X_left)
	 *				x_minus_tmp[q] = abs(data[q] - p1);
	 *			else
	 *				x_plus_tmp[q - X_left] = data[q] - p1;
	 *		}
	 *
	 *		for ( int q = 0; q < sizeA; q++){
	 *			alpha = start_A + q * inc_A;
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
	 *				else
	 *					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
	 *			}
	 *		}
	 *	}
	 *
	 *	Rewrite to:
	 *
	 *	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *		p1 = data[k];
	 *		X_left = k;
	 *		for ( int q = 0; q < sizeA; q++){
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += pow(abs(data[t]-p1), alpha) / N;
	 *				else
	 *					X_plus[q] += pow(abs(data[t]-p1), alpha)/ N;
	 *			}
	 *		}
	 *	}
	 *
	 *  EX: fix k; say k = 5
	 *  This maps to blockID's [400-479], alphaIndex [0-79], dataIndex = 5
	 *
	 *  p1 = data[dataIndex]
	 *  alpha = alpha[alphaIndex]
	 *
	 *  for each one of these blocks,
	 *
	 *  if tid < alphaIndex
	 *  	x_minus_tmp[tid] = abs(data[tid]-data[alphaIndex]);
	 *  else
	 *  	x_plus_tmp[q] = data[q]-data[alphaIndex];
	 *
	 *  Lets say blockID = 410, here dataIndex = 5, alphaIndex = 10
	 *
	 *  then for thread tid of this block:
	 *  if (tid <= 5)
	 *		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
	 *  else
	 *  	temp_X_minus[tid] = 0;
	 *
	 *
	 **************************************************************************/

	// Using constant memories constantData[]
	if (tid <= dataIndex){
		temp_X_minus[tid] = pow(-1*(constantData[tid]-p1), alpha);
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = pow((constantData[tid]-p1), alpha);
//		temp_X_plus[tid] = pow(abs(dev_data[tid]-p1), alpha);
//		temp_X_plus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;

	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/////////////////////////////////////////////////////////////////////////
	// Debuggin for temp_X_minus : code does not work
	/////////////////////////////////////////////////////////////////////////
/*
	if (dataIndex == 400 && alphaIndex == 40){
		//dev_debug[tid] = temp_X_minus[tid];
		//dev_debug[0] = p1;
		//dev_debug[1] = alpha;
		dev_debug[tid] = 1;
	}
	__syncthreads();
*/
	/////////////////////////////////////////////////////////////////////////
	// end debugging
	//////////////////////////////////////////////////////////////////////////

	int i = threadsPerBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp_X_minus[tid] += temp_X_minus[tid + i];
			temp_X_plus[tid] += temp_X_plus[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Copy back based on blockID,
	// Each block produced a unique result, data needs to go in the BIG array
	if (tid == 0){
		dev_X_plus[blockID] = temp_X_plus[0];
		dev_X_minus[blockID] = temp_X_minus[0];
	}
}// end kernelconstantCalculateXplusXminus


///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelConstantCalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_X_minus[] and dev_X_plus[] that were calculated by
// the kernelCalculateXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelConstantCalculateParameters (double *dev_X_minus, double *dev_X_plus, double *dev_H, double *dev_Kappa, double *dev_Sigma, double *dev_dataParam, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
//	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_X_plus[] and dev_X_minus[] 500 x 80 long
	// The layout of dev_X_plus[] and dev_X_minus[] is that
	// The dev_X_plus[index - index + 79] are the X_plus values
	// are generated by k = index
	//
	// Ex: k = 0, dev_X_plus[0-79] is calculated with alpha[0-79]
	//     k = 1, dev_X_plus[80-159] is calculated with alpha[0-79]
	//
	// alpha = start_A + q * inc_A;
	//
	// Because the calculations of Kappa, Sigma and H are independent of
	// everything except X_plus, X_minus, this kernel makes 500 * 80 of those
	// calculations and then finds the minimum values over 512 threads to produce 80 values
	// for Kappa, Sigma and H  (The difference of 512 and 500 is made up by dummy values)
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	//
	// Below, every thread gets an X_minus value
	// There are 80 blocks representing the 80 unique alpha values
	// Need to find the correct X_plus and X_minus indexes
	//
	// dev_plus[0-79] is calculated with alpha[0-79]
	// dev_plus[0,80,160....499*80] is calculated with alpha[0]
	//
	// blockID = 0, tid = 0 -> map it to dev_plus[0]
	// blockID = 0, tid = 1 -> map it to dev_plus[80]
	// blockID = 0, tid = 2 -> map it to dev_plus[160]
	//
	// blockID = 1, tid = 0 -> map it to dev_plus[1]
	// blockID = 1, tid = 1 -> map it to dev_plus[81]
	//
	// So mapping scheme is: X_plus = dev_plus[tid*80+blockID]
	//////////////////////////////////////////////////////////////////////////////////////////

	if (tid < windowSize){
		X_minus = dev_X_minus[tid * 80 + blockID]/windowSize;
		X_plus = dev_X_plus[tid * 80 + blockID]/windowSize;
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		// This is the check for NaN, the nvcc compiler doesn't recognize code to check nan
		// if (tempKappa[tid] != tempKappa[tid]) -> condition for nan, but always returns false

		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}

		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////
//	debugBIG[]
//  dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsPerBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = constantData[indexValue[0]];
		dev_H [blockID] = tempH[0];
	}

} // end kernelCalculateParameters


/////////////////////////////////////////////////////////////////////////////////////
// kernelDebugConstant looks at how constant memory works.
////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelDebugConstant(double *dev_debugConstant)
{
	int tid = threadIdx.x;
	if (tid < windowSize)
		dev_debugConstant[tid] = constantData[tid];

} // end kernelDebugConstant


/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * This wrapper uses constant read-only memory instead of global memory to improve performance.
 *
 ****************************************************************************************************************/

void constantAEPD_DistWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}

	// Copy the sorted data to the constant memory.
	cudaMemcpyToSymbol(constantData, data, sizeof(double) * windowSize);

	/////////////////////////////////////////////////////////////////////////////////////
	// kernelDebugConstant looks at how constant memory works.
	////////////////////////////////////////////////////////////////////////////////////
/*    double *dev_debugConstant;
    double *debugConstant;
    debugConstant = (double *) malloc (windowSize*sizeof(double));
    cudaMalloc( (void **)&dev_debugConstant, sizeof(double) * windowSize );
    kernelDebugConstant <<<1, 512>>> (dev_debugConstant);

    cudaMemcpy(debugConstant , dev_debugConstant, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );

    for (int i = 0; i < windowSize; i++){
    	cout << "debugConstant[" << i << "] = " << debugConstant[i];
    }
*/
    /////////////////////////////////////////////////////////////////////////////////////
    // end kernelDebugConstant
    ////////////////////////////////////////////////////////////////////////////////////


	/***************************************************************************
	 *
	 * kernelconstantCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	 * It is based on kernelCalculateXplusXminus, just with constant memory instead of
	 * global memory.
	 *
	 * dev_X_plus and dev_X_minus will be [500x80]
	 *
	 ***************************************************************************/

    double *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;


    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    kernelconstantCalculateXplusXminus <<<Grid, Block>>> (dev_alpha, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);

	/////////////////////////////////////////////////////////////////////////
	// Debugging for kernelCalculateXplusXminus
	/////////////////////////////////////////////////////////////////////////
/*
    cudaMemcpy(debug1 , dev_debug, sizeof(double) * lengthDevData, cudaMemcpyDeviceToHost );
    cout << "debug threads for k = 250, q = 40" << endl;
    for (int i = 0; i < lengthDevData; i++){
    	cout << "device tmp_X_minus[" << i << "] = " << debug1[i] << endl;
    }

    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    cout << "For k = 275" << endl;
    for (int i = 0; i < lengthDevAlpha; i++){
    	int offset = 275*80;
    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
    }
*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging for kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
	// This is done to be able to debug as each block simulates one alpha
	// because of the fact that we are trying to find a minimum over all H[k][q]
	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
	int KCP_devSize =  80; // Need to fix this to be written dynamically later

    double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
    double *Kappa, *Sigma, *dataParam, *H;

    // Allocate host memory for Kappa, Sigma, dataParam
    Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
    Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
    dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
    H = (double *) malloc(sizeof(double)*lengthDevAlpha);

    // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
    cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
    cudaMalloc( (void **)&dev_dataParam, sizeof(double) * KCP_devSize);

    dim3 BlockKCP(16,32);
    dim3 GridKCP(10,8);

    kernelConstantCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );


	/////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

/*	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     dataParam[" << i << "] = " << dataParam[i];
		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}
*/

	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha





	// Free memories on host and device


	//cudaFree(dev_data);
	//cudaFree(dev_alpha);
	cudaFree(dev_X_plus);
	cudaFree(dev_X_minus);
	//cudaFree(dev_sumXplus);
	//cudaFree(dev_sumXminus);
	cudaFree(dev_debugBig);
	cudaFree(dev_debug);
	//cudaFree(dev_lnGammaofAlpha);
	cudaFree(dev_Kappa);
	cudaFree(dev_Sigma);
	cudaFree(dev_dataParam);
	//cudaFree(dev_debugKCP);
	cudaFree(dev_H);

	//free(alpha);
	//free(logGammaofAlpha);
	free(debug1);
	free(debug2);
	free(debugBig);
	free(H);
	free(Kappa);
	free(Sigma);
	free(dataParam);
    return;
}	// end constantAEPD_DistWrapper









int main(void){

	  TimeVal startTime, endTime;
	  long totalTime;
	  ///////////////////////////////////////////////////////////////////////////////
	  // alpha and log-gamma(1/alpha) values are constant, declare and copy them here
	  ///////////////////////////////////////////////////////////////////////////////
	  int lengthDevAlpha = 80;
	  double *alpha, *logGammaofAlpha;
	  alpha = (double *) malloc (lengthDevAlpha*sizeof(double));
	  logGammaofAlpha = (double *) malloc (lengthDevAlpha*sizeof(double));

	  double sgngam = 0;

	  for (int i = 0; i < lengthDevAlpha; i++){
		  alpha[i] = 0.7 + i*0.1;
		  logGammaofAlpha[i] = locallngamma(1/alpha[i], &sgngam);
	  }
	  double *dev_alpha, *dev_lnGammaofAlpha;

	  cudaMalloc( (void **)&dev_alpha, sizeof(double) * lengthDevAlpha );
	  cudaMalloc( (void **)&dev_lnGammaofAlpha, sizeof(double) * lengthDevAlpha );

	  cudaMemcpy( dev_alpha, alpha, sizeof(double) * lengthDevAlpha, cudaMemcpyHostToDevice );
	  cudaMemcpy( dev_lnGammaofAlpha, logGammaofAlpha, sizeof(double)*lengthDevAlpha, cudaMemcpyHostToDevice );

	 //////////////////////////////////////////////////////////////////////
	 // Test Aleks' code with one launch
	 // First read in the data array (500 points) and then pass
	 // it to the CUDA wrapper
	 /////////////////////////////////////////////////////////////////////
		double *dataArray, *param;
		dataArray = (double *) malloc (windowSize*sizeof(double));
		param = (double *) malloc (4 * sizeof(double));
		fstream myfile("gbpusd.txt", ios::in);
		if (!myfile.is_open()){
			cout << "Error opening gbpusd text file " << endl;
			return 1;
		}
		double d = 0;
		int index= 0;
		while (index < windowSize){
			myfile >> d;
			dataArray[index++] = d;
		}
		// Finished reading and assigning data

		///////////////////////////////////////////////////////////////////////
		// Call Cuda version of AEPDFit();
		///////////////////////////////////////////////////////////////////////

		CUDAAEPD_DistWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha);

		cout<< "Parameters from device Cuda AEPD_Fit" << endl;
		for (int i = 0; i<4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}


		/////////////////////////////////////////////////////////////////////////
		// Call C++ Version of AEPD_fit()
		/////////////////////////////////////////////////////////////////////////

		localAEPD_Fit(dataArray, windowSize, param);

		cout<< "Parameters from localAEPD_Fit" << endl;
		for (int i = 0; i<4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}


		////////////////////////////////////////////////////////////////////////
		// Call Cuda version of AEPD_fit() using constant instead of global
		// memories
		////////////////////////////////////////////////////////////////////////

/*		constantAEPD_DistWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha);

		cout<< "Parameters from device Cuda AEPD_Fit using constant memory." << endl;
		for (int i = 0; i<4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}
*/

		////////////////////////////////////////////////////////////////////////
		// Code to test performance: first use local C++ clock methods
		//////////////////////////////////////////////////////////////////////

		// CUDA performance, global memory, local C++ clock
/*		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			CUDAAEPD_DistWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha);
		}

	    assert(GetTime(endTime)==0);
	    totalTime=GetTimeDifference(startTime, endTime);
	    cout << "Total time = " << totalTime << " microseconds." << endl;
	    cout << "Average time of global memory Cuda : " << totalTime/100 << " microseconds." << endl;

	    // Cuda performance, constant memory, local C++ clock
		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			constantAEPD_DistWrapper(dataArray, param, dev_alpha, dev_lnGammaofAlpha);
		}

	    assert(GetTime(endTime)==0);
	    totalTime=GetTimeDifference(startTime, endTime);
	    cout << "Total time = " << totalTime << " microseconds." << endl;
	    cout << "Average time of constant memory CUDA : " << totalTime/100 << " microseconds." << endl;

	    // C++ code performance
		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			localAEPD_Fit(dataArray, windowSize, param);
		}

	    assert(GetTime(endTime)==0);
	    totalTime=GetTimeDifference(startTime, endTime);
	    cout << "Total time = " << totalTime << " microseconds." << endl;
	    cout << "Average time of C++ : " << totalTime/100 << " microseconds." << endl;
*/
	    myfile.close();
		free (dataArray);
		free (param);
		return 0;

}





/*************************************************************************************
 * Log- Gamma function adapted from Alglib project
 *
 * Copy right and comments from original:
 ***************************************************************************************/

double locallngamma(double x, double* sgngam)
{

    double a;
    double b;
    double c;
    double p;
    double q;
    double u;
    double w;
    double z;
    int i;
    double logpi;
    double ls2pi;
    double tmp;
    double result;

    *sgngam = 0;

    *sgngam = 1;
    logpi = 1.14472988584940017414;
    ls2pi = 0.91893853320467274178;
    if( x<-34.0 )
    {
        q = -x;
        w = locallngamma(q, &tmp);
        p = floor(q);
        i = floor(p+0.5);
        if( i%2==0 )
        {
            *sgngam = -1;
        }
        else
        {
            *sgngam = 1;
        }
        z = q-p;
        if( z>0.5 )
        {
            p = p+1;
            z = p-q;
        }
        z = q*sin(ae_pi*z);
        result = logpi-log(z)-w;
        return result;
    }
    if( x<13 )
    {
        z = 1;
        p = 0;
        u = x;
        while(u>=3)
        {
            p = p-1;
            u = x+p;
            z = z*u;
        }
        while(u<2)
        {
            z = z/u;
            p = p+1;
            u = x+p;
        }
        if( z<0 )
        {
            *sgngam = -1;
            z = -z;
        }
        else
        {
            *sgngam = 1;
        }
        if( u==2)
        {
            result = log(z);
            return result;
        }
        p = p-2;
        x = x+p;
        b = -1378.25152569120859100;
        b = -38801.6315134637840924+x*b;
        b = -331612.992738871184744+x*b;
        b = -1162370.97492762307383+x*b;
        b = -1721737.00820839662146+x*b;
        b = -853555.664245765465627+x*b;
        c = 1;
        c = -351.815701436523470549+x*c;
        c = -17064.2106651881159223+x*c;
        c = -220528.590553854454839+x*c;
        c = -1139334.44367982507207+x*c;
        c = -2532523.07177582951285+x*c;
        c = -2018891.41433532773231+x*c;
        p = x*b/c;
        result = log(z)+p;
        return result;
    }
    q = (x-0.5)*log(x)-x+ls2pi;
    if(x>100000000 )
    {
        result = q;
        return result;
    }
    p = 1/(x*x);
    if( x>=1000.0 )
    {
        q = q+((7.9365079365079365079365*0.0001*p-2.7777777777777777777778*0.001)*p+0.0833333333333333333333)/x;
    }
    else
    {
        a = 8.11614167470508450300*0.0001;
        a = -5.95061904284301438324*0.0001+p*a;
        a = 7.93650340457716943945*0.0001+p*a;
        a = -2.77777777730099687205*0.001+p*a;
        a = 8.33333333333331927722*0.01+p*a;
        q = q+a/x;
    }
    result = q;
    return result;

} // end locallngamma

//////////////////////////////////////////////////////////////////////////////////////////////////////
//
// localAEPD_fit
// This function mimics AEPD_Fit(), has no external calls, this function will iterate over all alpha values
//
//////////////////////////////////////////////////////////////////////////////////////////////////////

void localAEPD_Fit(double * x, int N, double * params){

	// Take the array in x[0] - x[N-1] and put that into the vector data.
	vector<double> data(x, x+N);

	/*for (int i = 0; i < N; i++){
//		cout << "X[" << i << "] = " << x[i]<< endl;
		cout << "Before Sort data[" << i << "] = "<< data[i]<<endl;
	}*/


	/* Sorts the data: can be done in InArray.updateArray */
	double tmp;
	for(int i = 0;i < N-1 ;i++){
		for (int j = i + 1;j < N;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}


	///sort(data.begin(),data.end());

	//for (int i = 0;i< N; i++){
	//	cout << "data sorted element " << i << " = " << data[i] << endl;
	//}

	//for (int i = 0;i< 4; i++){
	//	cout << "params " << i << " = " << params[i] << endl;
	//}
	double INCREMENT_ALPHA = 0.1;
	int sizeALPHA = 80;
	int NUM_STEPS = 500;

	double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	double start_A = 0.7;//////// Value to Start Alpha Iteration
	int sizeA = sizeALPHA;//// Number of iterations to perform

	double ** Kappa = new double*[sizeA];
	double ** Sigma = new double*[sizeA];
	double ** H = new double*[sizeA];
	for (int i = 0 ; i< sizeA ; i++){
		Kappa[i] = new double[N];
		Sigma[i] = new double[N];
		H[i] = new double[N];
	}

	double * X_minus = new double[sizeA];
	double * X_plus = new double[sizeA];


	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;

	//int stepX = round(N/NUM_STEPS);
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{

		double p1 = data[k];

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

		for( int q = 0; q< N; q++){
			if (q < X_left)
				x_minus_tmp[q] = abs(data[q] - p1);
			else
				x_plus_tmp[q - X_left] = data[q] - p1;
		}

		/* start_A = 0.7
		 * q = [0 -> sizeA]
		 * inc_A = incrementAlpha
		 * alpha = 0.7 + q * inc_A
		 * Basically alpha goes from the start value through sizeA increments of inc_A 		 *
		 */


		for ( int q = 0; q < sizeA; q++){
			X_minus[q] = 0;
			X_plus[q] = 0;

			double alpha = start_A + q * inc_A;
			/* Goes through the array, If this is matlab then the code would be:
			 * alpha[sizeA] = 0.7 + sizeA*inc_A
			 * data[N] = data vector coming in
			 * k = index value			 *
			 * x_minus_tmp [k];
			 * x_plus_tmp[N-k];
			 * x_minus_tmp = abs( data[0-k] - data[k] );
			 * x_plus_tmp = data[k-N] - data[k];
			 * X_minus[q] = sum (x_minus_tmp.^alpha[q])/N;
			 * X_plus[q] = sum (x_plus_tmp.^alpha[q])/N;
			 *
			 * So the outer loop pins k, the inner loop pins q.
			 * and for each nested loop we have X_minus[k, q] and X_plus [k,q]
			 *
			 */

			for (int t = 0; t < N; t++){
				if (t < X_left)
					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
				else
					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
			}
			/////////////////////////////////////////////////////////////////////////////////////////
			// Debugging code
			////////////////////////////////////////////////////////////////////////////////////////////
	/*		if (k == 400){
				if (q == 40){
					for (int i = 0; i<k; i++)
						cout << "host pow(x_minus_tmp["<< i << "],alpha) = "  << pow(x_minus_tmp[i], alpha) << endl;
				}
			}

			if (k == 275)
				cout << "host X_minus[" << q << "] = " << X_minus[q] << endl; */
		}

		//Produce potential parameter sets as a function of  Alpha, Sigma, Kappa which will be used for maximization
		for (int q = 0; q < sizeA; q++){

			double alpha = start_A + q * inc_A;
			Kappa[q][k] = pow( X_minus[q] / X_plus[q], 1 / (2*(alpha +1)));

			Sigma[q][k] = pow((alpha*  pow( X_minus[q]*X_plus[q], alpha/ (2*(alpha+1)))\
				* ( pow(X_plus[q], 1/(alpha+1)) + pow(X_minus[q], 1/(alpha+1)))), 1/alpha);

			double sgngam = 0;
			H[q][k] = log(Sigma[q][k]) + locallngamma(1/alpha, &sgngam) \
				+ log(Kappa[q][k] + 1/Kappa[q][k]) + 1/alpha - log(alpha);

			//cout << "Kappa : " << Kappa[q* N + k] << "\t" << "Sigma : " << Sigma[q* N + k] << "\t"<< "H :" << H[q* N + k] << endl;
			// keep record of the minimum entropy index
			if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
				min_H = H[q][k];
				idx_k = k;
				idx_q = q;
			}
		}

		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
	}

	///////////////////////////////////////////////////////////////////////////////////////////
	// Debug code to print out and compare
	//////////////////////////////////////////////////////////////////////////////////////////
/*	for (int q = 0; q < sizeA; q++){
		double minH2 = 1e10;
		double tempKappa;
		double tempSigma;
		for (int k = 0; k < N; k++){
			if (H[q][k] < minH2){
				minH2 = H[q][k];
				tempKappa = Kappa[q][k];
				tempSigma = Sigma[q][k];
			}
		}
		cout << "Kappa[" << q << "] = :" << tempKappa;
		cout << "      Sigma[" << q << "] = :" << tempSigma;
		cout << endl;
	}
*/
	///////////////////////////////////////////////////////////////////////////////////////////
	// end debug
	//////////////////////////////////////////////////////////////////////////////////////////

	params[0] = data[idx_k]; // theta
	params[1] = Sigma[idx_q][idx_k]; // sigma
	params[2] = Kappa[idx_q][idx_k]; // kappa
	params[3] = start_A + idx_q * inc_A; // alpha

	for (int i = 0 ; i< sizeA ; i++){
		delete [] Kappa[i];
		delete [] Sigma[i];
		delete [] H[i];

	}

	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	delete [] X_minus;
	delete [] X_plus;
	//cout << "Got here with no problem "<< endl;
}
