/************************************************************************
 *
 * Compile for debugging: nvcc -arch=sm_13 -g -G fastAEPDfit.cu
 * Compile for performance: nvcc -arch=sm_20 -O fastAEPDfit.cu
 *
 * Best Performance: 14.2 ms per call.
 *
 * Compile for extra performance with nvcc -use_fast_math -arch=sm_20 -O fastAEPDfit.cu
 * -use_fast_math does not improve performance
 *
 * This file is made to write and test GPU Kernels launching different
 * alpha values.
 *
 * At the peak there will be 80 x 500 x 512 threads launched.
 *************************************************************************/

#include <sys/time.h>
#include <sys/resource.h>
#include <unistd.h>
#include <vector>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "math_functions.h"
#define windowSize 500
#define threadsPerBlock 512
#define EDGE 5
#define ae_pi 3.1415926535897932384626433832795
#define minDIV 0.0001
#define numDIV  600
#define FACTOR 10000
#define sizeAlpha 80
using namespace std;

double locallngamma(double x, double* sgngam);
//void localAEPD_Fit(double * x, int N, double * params);
void MakeDeviationMatrix(double **D);
//void fast_AEPD_Fit(double * x, int N, double * params, double *D);
/////////////////////////////////////////////////////////////////////////////
// Code imported from CPUTime.cpp:
// This code is used to check performance
//////////////////////////////////////////////////////////////////////////////

typedef struct timeval TimeVal, *TimeValPtr;
/* timeval is a struct with fields tv_sec for seconds and
                                   tv_usec for microseconds */

int GetTime (TimeVal &pTime)
{
  struct rusage usage;
  int status;

  if ((status= getrusage(RUSAGE_SELF,&usage))!=0)
    return (status);  /* return error code */

  pTime=usage.ru_utime; /* user time */

  return (status);
}

long GetTimeDifference (TimeVal &pStart, TimeVal &pStop)
{
  long usec, sec;
  std ::cout << "In Get Time Difference "<< std::endl;
  sec=pStop.tv_sec-pStart.tv_sec;    /* difference in seconds */
  usec=pStop.tv_usec-pStart.tv_usec; /* difference in microseconds */


  return ( sec*1000000l +  usec); /* result in microseconds */
}

////////////////////////////////////////////////////////////////////////
// End of code to check time
////////////////////////////////////////////////////////////////////////

//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
// each storing 40,000*8 bytes = 320,000 bytes
//
// Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
// So we need to launch another Kernel to do the final calculations of X_minus.
//
// The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  EX: fix k; say k = 5
//
//  This gets to a set of 80 blocks
//  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
//
//  Then fix q, say q = 10
//  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
//
//  Sum up that result and put it in dev_X_minus and dev_X_plus
//
/////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void  kernelFastCalculateXplusXminus (int * dev_intData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q
	int threadsInBlock = blockDim.y*blockDim.x;
//	double alpha = dev_alpha[alphaIndex];
//	double p1 = dev_data[dataIndex];
	int p1 = dev_intData[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 * for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *  	p1 = data[k];

			for( int q = 0; q< N; q++){
				if(q<=k){
					// FACTOR = 10000, round should not be doing anything
					x_minus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_plus_tmp[q]=0;
				}
				else if(q>k){
					x_plus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_minus_tmp[q]=0;
				}
			}

			for ( int q = 0; q < sizeA; q++){
				X_minus[q] = 0;
				X_plus[q] = 0;

				for( int t = 0; t< N; t++){
					if(x_minus_tmp[t]>0){
						///cout<<"x_minus:\t"<< D[x_minus_tmp[t]][q]<<endl;
						//X_minus[q] += D[x_minus_tmp[t]].shapeIDX[q] / N;
						X_minus[q] += D[x_minus_tmp[t]*sizeAlpha + q];
					}
					if(x_plus_tmp[t]>0){

						//X_plus[q] += D[x_plus_tmp[t]].shapeIDX[q]/ N;
						X_plus[q] += D[x_plus_tmp[t]*sizeAlpha + q];
					}
				}
			}
		}


		Rewrite to:

		for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){{
			p1 = data[k];
			X_left = k;
	 *		for ( int q = 0; q < sizeA; q++){
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += D[round(FACTOR*abs(data[q] - p1))*sizeAlpha + q];
	 *				else
	 *					X_plus[q] += D[round(FACTOR*abs(data[q] - p1)) * sizeAlpha + q];
	 *			}
	 *		}
	 *	}
	 *
	 *
	 *
	 *
	 *  EX: fix k; say k = 5
	 *  This maps to blockID's [400-479], alphaIndex [0-79], dataIndex = 5
	 *
	 *  p1 = data[dataIndex]
	 *  alpha = alpha[alphaIndex]
	 *
	 *  for each one of these blocks,
	 *
	 *  if tid < alphaIndex
	 *  	x_minus_tmp[tid] = abs(data[tid]-data[alphaIndex]);
	 *  else
	 *  	x_plus_tmp[q] = data[q]-data[alphaIndex];
	 *
	 *  Lets say blockID = 410, here dataIndex = 5, alphaIndex = 10
	 *
	 *  then for thread tid of this block:
	 *  if (tid <= 5)
	 *		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
	 *  else
	 *  	temp_X_minus[tid] = 0;
	 *
	 *
	 **************************************************************************/


	if (tid <= dataIndex){
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
		temp_X_minus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
//		temp_X_plus[tid] = pow((dev_data[tid]-p1), alpha);
		temp_X_plus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/////////////////////////////////////////////////////////////////////////
	// Debuggin for temp_X_minus
	/////////////////////////////////////////////////////////////////////////
/*
	if (dataIndex == 400 && alphaIndex == 40){
		//dev_debug[tid] = temp_X_minus[tid];
		//dev_debug[0] = p1;
		//dev_debug[1] = alpha;
		dev_debug[tid] = 1;
	}
	__syncthreads();
*/
	/////////////////////////////////////////////////////////////////////////
	// end debugging
	//////////////////////////////////////////////////////////////////////////

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp_X_minus[tid] += temp_X_minus[tid + i];
			temp_X_plus[tid] += temp_X_plus[tid + i];
		}
		__syncthreads();
		i /= 2;
	}
	///////////////////////////////////////////////////////////////////////////
	// Copy back based on blockID,
	// Each block produced a unique result, data needs to go in the BIG array
	// dev_X_plus and dev_X_minus each hold 500x80 values
	// dev_X_plus[blockID] holds the X_plus value for k = blockID/ 80 and q = blockID % 80
	// So dev_X_plus [800 - 879] holds X_plus for k = 10 and q = 0-79.
	//////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_X_plus[blockID] = temp_X_plus[0];
		dev_X_minus[blockID] = temp_X_minus[0];
	}

}// end kernelFastCalculateXplusXminus

//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelFastLowMemCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
// each storing 40,000*8 bytes = 320,000 bytes
//
// Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
// So we need to launch another Kernel to do the final calculations of X_minus.
//
// The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  EX: fix k; say k = 5
//
//  This gets to a set of 80 blocks
//  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
//
//  Then fix q, say q = 10
//  Then we have a single block that needs to calculate pow(x_minus_tmp[], alpha)
//
//  Sum up that result and put it in dev_X_minus and dev_X_plus
//
/////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void  kernelFastLowMemCalculateXplusXminus (int * dev_intData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(lengthDevData, lengthDevAlpha);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *	Assume lengthDevData = 500, lengthDevAlpha = 2
	 *	blockID will be the index to into the 2D array
	 *
	 *  There is a need to index the data for each block
	 *  within the grid, since 500 blocks are performing an
	 *  operation, blockID%500 will provide an index to 500
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// k
	int alphaIndex = blockID % 80;			// q
	int threadsInBlock = blockDim.y*blockDim.x;
//	double alpha = dev_alpha[alphaIndex];
//	double p1 = dev_data[dataIndex];
	int p1 = dev_intData[dataIndex];

	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	/*************************************************************************
	 * This code produces:
	 *
	 * for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
	 *  	p1 = data[k];

			for( int q = 0; q< N; q++){
				if(q<=k){
					// FACTOR = 10000, round should not be doing anything
					x_minus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_plus_tmp[q]=0;
				}
				else if(q>k){
					x_plus_tmp[q] = round(FACTOR*abs(data[q] - p1));
					x_minus_tmp[q]=0;
				}
			}

			for ( int q = 0; q < sizeA; q++){
				X_minus[q] = 0;
				X_plus[q] = 0;

				for( int t = 0; t< N; t++){
					if(x_minus_tmp[t]>0){
						///cout<<"x_minus:\t"<< D[x_minus_tmp[t]][q]<<endl;
						//X_minus[q] += D[x_minus_tmp[t]].shapeIDX[q] / N;
						X_minus[q] += D[x_minus_tmp[t]*sizeAlpha + q];
					}
					if(x_plus_tmp[t]>0){

						//X_plus[q] += D[x_plus_tmp[t]].shapeIDX[q]/ N;
						X_plus[q] += D[x_plus_tmp[t]*sizeAlpha + q];
					}
				}
			}
		}


		Rewrite to:

		for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){{
			p1 = data[k];
			X_left = k;
	 *		for ( int q = 0; q < sizeA; q++){
	 *			for (int t = 0; t < N; t++){
	 *				if (t < X_left)
	 *					X_minus[q] += D[round(FACTOR*abs(data[q] - p1))*sizeAlpha + q];
	 *				else
	 *					X_plus[q] += D[round(FACTOR*abs(data[q] - p1)) * sizeAlpha + q];
	 *			}
	 *		}
	 *	}
	 *
	 *
	 *
	 *
	 *  EX: fix k; say k = 5
	 *  This maps to blockID's [400-479], alphaIndex [0-79], dataIndex = 5
	 *
	 *  p1 = data[dataIndex]
	 *  alpha = alpha[alphaIndex]
	 *
	 *  for each one of these blocks,
	 *
	 *  if tid < alphaIndex
	 *  	x_minus_tmp[tid] = abs(data[tid]-data[alphaIndex]);
	 *  else
	 *  	x_plus_tmp[q] = data[q]-data[alphaIndex];
	 *
	 *  Lets say blockID = 410, here dataIndex = 5, alphaIndex = 10
	 *
	 *  then for thread tid of this block:
	 *  if (tid <= 5)
	 *		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha)/windowSize;
	 *  else
	 *  	temp_X_minus[tid] = 0;
	 *
	 *
	 **************************************************************************/


	if (tid <= dataIndex){
//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
		temp_X_minus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
		temp_X_plus[tid] = 0;
	}
	else if (tid >dataIndex && tid < windowSize){
		temp_X_minus[tid] = 0;
//		temp_X_plus[tid] = pow((dev_data[tid]-p1), alpha);
		temp_X_plus[tid] = dev_deviationMatrix[(abs(dev_intData[tid]-p1))*sizeAlpha + alphaIndex];
	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/////////////////////////////////////////////////////////////////////////
	// Debuggin for temp_X_minus
	/////////////////////////////////////////////////////////////////////////
/*
	if (dataIndex == 400 && alphaIndex == 40){
		//dev_debug[tid] = temp_X_minus[tid];
		//dev_debug[0] = p1;
		//dev_debug[1] = alpha;
		dev_debug[tid] = 1;
	}
	__syncthreads();
*/
	/////////////////////////////////////////////////////////////////////////
	// end debugging
	//////////////////////////////////////////////////////////////////////////

//	int i = threadsInBlock/2;  // i is the number of threads per block
//	while (i != 0) {
//		if (tid < i){
//			temp_X_minus[tid] += temp_X_minus[tid + i];
//			temp_X_plus[tid] += temp_X_plus[tid + i];
//		}
//		__syncthreads();
//		i /= 2;
//	}
	////////////////////////////////////////////////////////////////////////
	// 8 x 64
	// Different way of summing:
	// 1. Declare two more shared arrays for sums
	// 2. Each array has 8 elements
	// 3. Sum 64 elements for each array element
	// Result: This code reduces the time to 14.2 ms for 1 threads
	//		   Code reduces to 14.2 ms for 2 threads
	////////////////////////////////////////////////////////////////////////
	__shared__ double tempSumXplus [8];
	__shared__ double tempSumXminus [8];
	int offset;
	if (tid < 8){
		offset = tid * 64;
		tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
				temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
				temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
				temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
				temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
				temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32] + temp_X_plus[offset + 33] + temp_X_plus[offset + 34] + temp_X_plus[offset + 35] + \
				temp_X_plus[offset + 36] + temp_X_plus[offset + 37] + temp_X_plus[offset + 38] + temp_X_plus[offset + 39] + temp_X_plus[offset + 40] + temp_X_plus[offset + 41] + \
				temp_X_plus[offset + 42] + temp_X_plus[offset + 43] + temp_X_plus[offset + 44] + temp_X_plus[offset + 45] + temp_X_plus[offset + 46] + temp_X_plus[offset + 47] + \
				temp_X_plus[offset + 48] + temp_X_plus[offset + 49] + temp_X_plus[offset + 50] + temp_X_plus[offset + 51] + temp_X_plus[offset + 52] + temp_X_plus[offset + 53] + \
				temp_X_plus[offset + 54] + temp_X_plus[offset + 55] + temp_X_plus[offset + 56] + temp_X_plus[offset + 57] + temp_X_plus[offset + 58] + temp_X_plus[offset + 59] + \
				temp_X_plus[offset + 60] + temp_X_plus[offset + 61] + temp_X_plus[offset + 62] + temp_X_plus[offset + 63];

		tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
			temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
			temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
			temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
			temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
			temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32] + temp_X_minus[offset + 33] + temp_X_minus[offset + 34] + temp_X_minus[offset + 35] + \
			temp_X_minus[offset + 36] + temp_X_minus[offset + 37] + temp_X_minus[offset + 38] + temp_X_minus[offset + 39] + temp_X_minus[offset + 40] + temp_X_minus[offset + 41] + \
			temp_X_minus[offset + 42] + temp_X_minus[offset + 43] + temp_X_minus[offset + 44] + temp_X_minus[offset + 45] + temp_X_minus[offset + 46] + temp_X_minus[offset + 47] + \
			temp_X_minus[offset + 48] + temp_X_minus[offset + 49] + temp_X_minus[offset + 50] + temp_X_minus[offset + 51] + temp_X_minus[offset + 52] + temp_X_minus[offset + 53] + \
			temp_X_minus[offset + 54] + temp_X_minus[offset + 55] + temp_X_minus[offset + 56] + temp_X_minus[offset + 57] + temp_X_minus[offset + 58] + temp_X_minus[offset + 59] + \
			temp_X_minus[offset + 60] + temp_X_minus[offset + 61] + temp_X_minus[offset + 62] + temp_X_minus[offset + 63];
	}
	__syncthreads();
	//////////////////////////////////////////////////////////////////////////////////////
	// One Thread
	//////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_X_plus[blockID] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
		dev_X_minus[blockID] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
	}


	//////////////////////////////////////////////////////////////////////////////////////
	// Two Thread
	//////////////////////////////////////////////////////////////////////////////////////
//		if (tid == 0){
//			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
//		}
//		if (tid == 1){
//			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
//		}
	__syncthreads();

	////////////////////////////////////////////////////////////////////////////////////////////////////////////
	// End 8 x 64 architecture
	////////////////////////////////////////////////////////////////////////////////////////////////////////////


}// kernelFastLowMemCalculateXplusXminus





///////////////////////////////////////////////////////////////////////////////////////////////////////
// kernelcalculateParameters() is a CUDA kernel
// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
// where k and q make up the index to dev_X_minus and dev_X_plus
//
// This Kernel uses the values of dev_X_minus[] and dev_X_plus[] that were calculated by
// the kernelCalculateXplusXminus() Kernel.
//
// currently launches with Blocks(16,32) Grid(10,8)
// This is done to be able to debug as each block simulates one alpha
// because of the fact that we are trying to find a minimum over all H[k][q]
// the launch could be varied
//
///////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void kernelCalculateParameters (double *dev_X_minus, double *dev_X_plus, double *dev_H, double *dev_Kappa, double *dev_Sigma, double *dev_dataParam, double *dev_data, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_debugBig)
{
	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int threadsInBlock = blockDim.y*blockDim.x;

	///////////////////////////////////////////////////////////////////////////////////////////
	// There are 80 blocks of 512 threads launched with this Kernel
	//
	// dev_X_plus[] and dev_X_minus[] 500 x 80 long
	// The layout of dev_X_plus[] and dev_X_minus[] is that
	// The dev_X_plus[index - index + 79] are the X_plus values
	// are generated by k = index
	//
	// Ex: k = 0, dev_X_plus[0-79] is calculated with alpha[0-79]
	//     k = 1, dev_X_plus[80-159] is calculated with alpha[0-79]
	//
	// alpha = start_A + q * inc_A;
	//
	// Because the calculations of Kappa, Sigma and H are independent of
	// everything except X_plus, X_minus, this kernel makes 500 * 80 of those
	// calculations and then finds the minimum values over 512 threads to produce 80 values
	// for Kappa, Sigma and H  (The difference of 512 and 500 is made up by dummy values)
	//
	// Shared variables are used exclusively to check for minimal values.
	////////////////////////////////////////////////////////////////////////////////////////////

	double alpha = dev_alpha[blockID]; 							// map alpha[i] to block[i]
	double LnGamma = dev_lnGammaofAlpha[blockID];				// map log-gamma(alpha[i]) to block[i]
	double X_minus;
	double X_plus;
	///////////////////////////////////////////////////////////////////////////////////////////
	//
	// the if condition checks for overflow.
	//
	//
	// Below, every thread gets an X_minus value
	// There are 80 blocks representing the 80 unique alpha values
	// Need to find the correct X_plus and X_minus indexes
	//
	// dev_plus[0-79] is calculated with alpha[0-79]
	// dev_plus[0,80,160....499*80] is calculated with alpha[0]
	//
	// blockID = 0, tid = 0 -> map it to dev_plus[0]
	// blockID = 0, tid = 1 -> map it to dev_plus[80]
	// blockID = 0, tid = 2 -> map it to dev_plus[160]
	//
	// blockID = 1, tid = 0 -> map it to dev_plus[1]
	// blockID = 1, tid = 1 -> map it to dev_plus[81]
	//
	// So mapping scheme is: X_plus = dev_plus[tid*80+blockID]
	//////////////////////////////////////////////////////////////////////////////////////////

	// doing this calculation here doesn't effect the timing much, actually increased it a bit
	int offset = tid * 80;
	if (tid < windowSize){
		X_minus = dev_X_minus[offset + blockID];
		X_plus = dev_X_plus[offset + blockID];
	}
	else{
		X_minus = 0;
		X_plus = 0;
	}

	////////////// IMPORTANT: threadsPerBlock == threadsInBlock ////////////////////////////////
	////////////// IMPORTANT: close to the limit of space for shared variables /////////////////

	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];


	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/

	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		// This is the check for NaN, the nvcc compiler doesn't recognize code to check nan
		// if (tempKappa[tid] != tempKappa[tid]) -> condition for nan, but always returns false

		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}

		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}
	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	//  Debugging for tempKappa, tempSigma, tempH
	///////////////////////////////////////////////////////////////////////////////////////
//	debugBIG[]
//  dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	////////////////////////////////////////////////////////////////////////////////////////
	// Finished debugging for tempKappa, tempSigma, tempH
	////////////////////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////////////
	// Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	////////////////////////////////////////////////////////////////////////////////////////


	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	////////////////////////////////////////////////////////////////////////////////////////
	// Each block writes to Kappa[], Sigma[], and dataParam[]
	////////////////////////////////////////////////////////////////////////////////////////
	if (tid == 0){
		dev_Kappa[blockID] = tempKappa[0];
		dev_Sigma[blockID] = tempSigma[0];
		dev_dataParam[blockID] = dev_data[indexValue[0]];
		dev_H [blockID] = tempH[0];
	}

} // end kernelCalculateParameters


/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/




void CUDAfastAEPD_DistWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_deviationMatrix){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	/////////////////////////////////////////////////////////////////////////
	// Perform multiplication and rounding here
	/////////////////////////////////////////////////////////////////////////



	int * integerData;
	integerData = (int *) malloc (sizeof(int) * windowSize);

	for(int i = 0; i < windowSize; i++){
		integerData[i] = round(data[i] * FACTOR);
	}

	////////////////////////////////////////////////////////////////////////////////
	//
	// kernelFastCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	//
	// dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	// dev_X_plus and dev_X_minus will be [500x80]
	//
	////////////////////////////////////////////////////////////////////////////////
    int *dev_intData;
	double *dev_data, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_intData, sizeof(int) * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy integerData to dev_intData.
    cudaMemcpy( dev_intData, integerData, sizeof(int) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );

    dim3 Grid(lengthDevData, lengthDevAlpha);
    dim3 Block(16, 32);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

//    kernelFastCalculateXplusXminus <<<Grid, Block>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);
    kernelFastLowMemCalculateXplusXminus <<<Grid, Block>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);


    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
/*    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
    cout << "For k = 275" << endl;
    for (int i = 0; i < lengthDevAlpha; i++){
    	int offset = 275*80;
    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
    }

    cout << "X_minus fast" << endl;
	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
	}
*/
    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
 	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
 	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
 	// This is done to be able to debug as each block simulates one alpha
 	// because of the fact that we are trying to find a minimum over all H[k][q]
 	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
 	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
 	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

 	int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
 	int KCP_devSize =  80; // Need to fix this to be written dynamically later

     double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
     double *Kappa, *Sigma, *dataParam, *H;

     // Allocate host memory for Kappa, Sigma, dataParam
     Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
     Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
     dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
     H = (double *) malloc(sizeof(double)*lengthDevAlpha);

     // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
     cudaMalloc( (void **)&dev_H, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_Kappa, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_Sigma, sizeof(double) * KCP_devSize);
     cudaMalloc( (void **)&dev_dataParam, sizeof(double) * KCP_devSize);

     dim3 BlockKCP(16,32);
     dim3 GridKCP(10,8);

     kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
 	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );



    /////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

/*	cout << "Debugging kernel calculate Parameters" << endl;
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     H[" << i << "] = " << H[i];
//		cout << "     dataParam[" << i << "] = " << dataParam[i];
//		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}

*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha



    ////////////////////////////////////////////////////////////////////////////////////////////////
    // Clear memeory
    ///////////////////////////////////////////////////////////////////////////////////////////////

    free (Kappa);
    free (Sigma);
    free (dataParam);
    free (H);
    free (debug1);
    free (debug2);
    free (debugBig);
    cudaFree (dev_data);
    cudaFree (dev_intData);
    cudaFree (dev_X_plus);
    cudaFree (dev_X_minus);
    cudaFree (dev_debugBig);
    cudaFree (dev_debug);
    cudaFree (dev_Kappa);
    cudaFree (dev_Sigma);
    cudaFree (dev_dataParam);
    cudaFree (dev_H);
    return;

} // end CUDAfastAEPD_DistWrapper

//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelLowMemCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = tid
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
// each storing 40,000*8 bytes = 320,000 bytes
//
// The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  EX: fix k; say k = 5
//
//  Launch 80 blocks of 500 threads (512)
//  these block needs to calculate x_minus_tmp and x_plus_tmp constant for k = 5
//
/////////////////////////////////////////////////////////////////////////////////////////////////////


__global__ void  kernelLowMemCalculateXplusXminus (int * dev_intData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	/***********************************************************************
	 *
	 *  Launch Code for this Kernel:
	 *
	 *  dim3 Grid(10, 8);
     *	dim3 Block(16, 32);
	 *
	 *  In order for all blocks to achieve unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 *
	 *  There is a need to find the right alpha
	 *  for the first 500 blocks, the alpha used should be alpha[0]
	 *  for the 2nd 500 blocks, the alpha used should be alpha[1]
	 *  so use blockID/500 for alphaIndex
	 *
	 ************************************************************************/

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int alphaIndex = blockIdx.x*gridDim.y + blockIdx.y; 	// alphaIndex = blockID
	int threadsInBlock = blockDim.y*blockDim.x;
	int p1;


	__shared__ int data [windowSize];
	__shared__ double dev_Matrix [windowSize];
	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];

	if (tid < windowSize){
		data[tid] = dev_intData[tid];
		dev_Matrix [tid] =  dev_deviationMatrix[sizeAlpha * tid + alphaIndex];
	}
	__syncthreads();

	for (int k = 0; k < windowSize; k++){
		p1 = data[k];

		if (tid <= k){
//			temp_X_minus[tid] = 1;
			temp_X_minus[tid] = dev_Matrix[abs(data[tid]-p1)];
			temp_X_plus[tid] = 0;
		}
		else if (tid >k && tid < windowSize){
			temp_X_minus[tid] = 0;
//			temp_X_plus [tid] = 1;
			temp_X_plus[tid] = dev_Matrix[abs(data[tid]-p1)];
		}
		else {
			temp_X_minus[tid] = 0;
			temp_X_plus[tid] = 0;
		}


		////////////////////////////////////////////////////////////////////////
		// This code adds 3ms to time
		///////////////////////////////////////////////////////////////////////
/*		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
		if (tid <= k){
				temp_X_minus[tid] = dev_Matrix[abs(data[tid]-p1)];
		}
		if (tid > k && tid < windowSize){
			temp_X_plus[tid] = dev_Matrix[abs(data[tid]-p1)];
		}
*/
		__syncthreads();

		////////////////////////////////////////////////////////////////
		// Sum Threads
		///////////////////////////////////////////////////////////////

		//////////////////////////////////////////////////////////////////////
		// Initial summing technique: log2(N) = log2(512) iterations,
		// Code from CUDA programming techniques.
		// Unexpectedly high latency from this block of code
		// Code takes 25 ms with this block, 7 ms without this block.
		/////////////////////////////////////////////////////////////////////
/*		int i = 256;  // i is the number of threads per block
		while (i != 0) {
			if (tid < i){
				temp_X_minus[tid] += temp_X_minus[tid + i];
				temp_X_plus[tid] += temp_X_plus[tid + i];
			}
			__syncthreads();
			i /= 2;
		}
		///////////////////////////////////////////////////////////////////////////
		// Copy back based on blockID,
		// Each block produced a unique result, data needs to go in the BIG array
		// dev_X_plus and dev_X_minus each hold 500x80 values
		// dev_X_plus[blockID] holds the X_plus value for k = blockID/ 80 and q = blockID % 80
		// So dev_X_plus [800 - 879] holds X_plus for k = 10 and q = 0-79.
		//////////////////////////////////////////////////////////////////////////
		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = temp_X_plus[0];
			dev_X_minus[k*sizeAlpha + alphaIndex] = temp_X_minus[0];
		}
		__syncthreads();
*/

		//////////////////////////////////////////////////////////////////////////
		// End 25 ms sum section
		//////////////////////////////////////////////////////////////////////////

		//////////////////////////////////////////////////////////////////////////
		// One iteration of code takes: 14 ms
		////////////////////////////////////////////////////////////////////////
/*		if (tid < 256){
			temp_X_minus[tid] += temp_X_minus[tid + 256];
			temp_X_plus[tid] += temp_X_plus[tid + 256];
		}

		__syncthreads();

		//////////////////////////////////////////////////////////////////////////
		// Two iteration of code takes: 18 ms
		////////////////////////////////////////////////////////////////////////

		if (tid < 128){
			temp_X_minus[tid] += temp_X_minus[tid + 128];
			temp_X_plus[tid] += temp_X_plus[tid + 128];
		}
		__syncthreads();
		//////////////////////////////////////////////////////////////////////////
		// Three iteration of code takes: 20 ms
		////////////////////////////////////////////////////////////////////////
		if (tid < 64){
			temp_X_minus[tid] += temp_X_minus[tid + 64];
			temp_X_plus[tid] += temp_X_plus[tid + 64];
		}
		__syncthreads();
		///////////////////////////////////////////////////////////////////////////
		// This style is still too slow
		///////////////////////////////////////////////////////////////////////////
*/


		////////////////////////////////////////////////////////////////////////
		// 8 x 64
		// Different way of summing:
		// 1. Declare two more shared arrays for sums
		// 2. Each array has 8 elements
		// 3. Sum 64 elements for each array element
		// Result: This code reduces the time to 14.2 ms for 1 threads
		//		   Code reduces to 14.2 ms for 2 threads
		////////////////////////////////////////////////////////////////////////
		__shared__ double tempSumXplus [8];
		__shared__ double tempSumXminus [8];
		int offset;
		if (tid < 8){
			offset = tid * 64;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
					temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
					temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
					temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
					temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32] + temp_X_plus[offset + 33] + temp_X_plus[offset + 34] + temp_X_plus[offset + 35] + \
					temp_X_plus[offset + 36] + temp_X_plus[offset + 37] + temp_X_plus[offset + 38] + temp_X_plus[offset + 39] + temp_X_plus[offset + 40] + temp_X_plus[offset + 41] + \
					temp_X_plus[offset + 42] + temp_X_plus[offset + 43] + temp_X_plus[offset + 44] + temp_X_plus[offset + 45] + temp_X_plus[offset + 46] + temp_X_plus[offset + 47] + \
					temp_X_plus[offset + 48] + temp_X_plus[offset + 49] + temp_X_plus[offset + 50] + temp_X_plus[offset + 51] + temp_X_plus[offset + 52] + temp_X_plus[offset + 53] + \
					temp_X_plus[offset + 54] + temp_X_plus[offset + 55] + temp_X_plus[offset + 56] + temp_X_plus[offset + 57] + temp_X_plus[offset + 58] + temp_X_plus[offset + 59] + \
					temp_X_plus[offset + 60] + temp_X_plus[offset + 61] + temp_X_plus[offset + 62] + temp_X_plus[offset + 63];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
				temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
				temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
				temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
				temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
				temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32] + temp_X_minus[offset + 33] + temp_X_minus[offset + 34] + temp_X_minus[offset + 35] + \
				temp_X_minus[offset + 36] + temp_X_minus[offset + 37] + temp_X_minus[offset + 38] + temp_X_minus[offset + 39] + temp_X_minus[offset + 40] + temp_X_minus[offset + 41] + \
				temp_X_minus[offset + 42] + temp_X_minus[offset + 43] + temp_X_minus[offset + 44] + temp_X_minus[offset + 45] + temp_X_minus[offset + 46] + temp_X_minus[offset + 47] + \
				temp_X_minus[offset + 48] + temp_X_minus[offset + 49] + temp_X_minus[offset + 50] + temp_X_minus[offset + 51] + temp_X_minus[offset + 52] + temp_X_minus[offset + 53] + \
				temp_X_minus[offset + 54] + temp_X_minus[offset + 55] + temp_X_minus[offset + 56] + temp_X_minus[offset + 57] + temp_X_minus[offset + 58] + temp_X_minus[offset + 59] + \
				temp_X_minus[offset + 60] + temp_X_minus[offset + 61] + temp_X_minus[offset + 62] + temp_X_minus[offset + 63];
		}
		__syncthreads();
		//////////////////////////////////////////////////////////////////////////////////////
		// One Thread
		//////////////////////////////////////////////////////////////////////////////////////
		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
		}


		//////////////////////////////////////////////////////////////////////////////////////
		// Two Thread
		//////////////////////////////////////////////////////////////////////////////////////
//		if (tid == 0){
//			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
//		}
//		if (tid == 1){
//			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
//		}
		__syncthreads();

		////////////////////////////////////////////////////////////////////////////////////////////////////////////
		// End 8 x 64 architecture
		////////////////////////////////////////////////////////////////////////////////////////////////////////////


		////////////////////////////////////////////////////////////////////////
		//
		// Try 4 x 128 architecture
		// 1. Declare two more shared arrays for sums
		// 2. Each array has 4 elements
		// 3. Sum 128 elements for each array element
		// Result: This code reduces the time to 15.6 ms!!!
		//
		////////////////////////////////////////////////////////////////////////
/*		__shared__ double tempSumXplus [4];
		__shared__ double tempSumXminus [4];
		int offset;
		if (tid < 4){
			offset = tid * 128;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
					temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
					temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
					temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
					temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32] + temp_X_plus[offset + 33] + temp_X_plus[offset + 34] + temp_X_plus[offset + 35] + \
					temp_X_plus[offset + 36] + temp_X_plus[offset + 37] + temp_X_plus[offset + 38] + temp_X_plus[offset + 39] + temp_X_plus[offset + 40] + temp_X_plus[offset + 41] + \
					temp_X_plus[offset + 42] + temp_X_plus[offset + 43] + temp_X_plus[offset + 44] + temp_X_plus[offset + 45] + temp_X_plus[offset + 46] + temp_X_plus[offset + 47] + \
					temp_X_plus[offset + 48] + temp_X_plus[offset + 49] + temp_X_plus[offset + 50] + temp_X_plus[offset + 51] + temp_X_plus[offset + 52] + temp_X_plus[offset + 53] + \
					temp_X_plus[offset + 54] + temp_X_plus[offset + 55] + temp_X_plus[offset + 56] + temp_X_plus[offset + 57] + temp_X_plus[offset + 58] + temp_X_plus[offset + 59] + \
					temp_X_plus[offset + 60] + temp_X_plus[offset + 61] + temp_X_plus[offset + 62] + temp_X_plus[offset + 63] + temp_X_plus[offset + 64] + temp_X_plus[offset + 65] + \
					temp_X_plus[offset + 66] + temp_X_plus[offset + 67] + temp_X_plus[offset + 68] + temp_X_plus[offset + 69] + temp_X_plus[offset + 70] + temp_X_plus[offset + 71] + \
					temp_X_plus[offset + 72] + temp_X_plus[offset + 73] + temp_X_plus[offset + 74] + temp_X_plus[offset + 75] + temp_X_plus[offset + 76] + temp_X_plus[offset + 77] + \
					temp_X_plus[offset + 78] + temp_X_plus[offset + 79] + temp_X_plus[offset + 80] + temp_X_plus[offset + 81] + temp_X_plus[offset + 82] + temp_X_plus[offset + 83] + \
					temp_X_plus[offset + 84] + temp_X_plus[offset + 85] + temp_X_plus[offset + 86] + temp_X_plus[offset + 87] + temp_X_plus[offset + 88] + temp_X_plus[offset + 89] + \
					temp_X_plus[offset + 90] + temp_X_plus[offset + 91] + temp_X_plus[offset + 92] + temp_X_plus[offset + 93] + temp_X_plus[offset + 94] + temp_X_plus[offset + 95] + \
					temp_X_plus[offset + 96] + temp_X_plus[offset + 97] + temp_X_plus[offset + 98] + temp_X_plus[offset + 99] + temp_X_plus[offset + 100] + temp_X_plus[offset + 101] + \
					temp_X_plus[offset + 102] + temp_X_plus[offset + 103] + temp_X_plus[offset + 104] + temp_X_plus[offset + 105] + temp_X_plus[offset + 106] + temp_X_plus[offset + 107] + \
					temp_X_plus[offset + 108] + temp_X_plus[offset + 109] + temp_X_plus[offset + 110] + temp_X_plus[offset + 111] + temp_X_plus[offset + 112] + temp_X_plus[offset + 113] + \
					temp_X_plus[offset + 114] + temp_X_plus[offset + 115] + temp_X_plus[offset + 116] + temp_X_plus[offset + 117] + temp_X_plus[offset + 118] + temp_X_plus[offset + 119] + \
					temp_X_plus[offset + 120] + temp_X_plus[offset + 121] + temp_X_plus[offset + 122] + temp_X_plus[offset + 123] + temp_X_plus[offset + 124] + temp_X_plus[offset + 125] + \
					temp_X_plus[offset + 126] + temp_X_plus[offset + 127];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
					temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
					temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
					temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
					temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
					temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32] + temp_X_minus[offset + 33] + temp_X_minus[offset + 34] + temp_X_minus[offset + 35] + \
					temp_X_minus[offset + 36] + temp_X_minus[offset + 37] + temp_X_minus[offset + 38] + temp_X_minus[offset + 39] + temp_X_minus[offset + 40] + temp_X_minus[offset + 41] + \
					temp_X_minus[offset + 42] + temp_X_minus[offset + 43] + temp_X_minus[offset + 44] + temp_X_minus[offset + 45] + temp_X_minus[offset + 46] + temp_X_minus[offset + 47] + \
					temp_X_minus[offset + 48] + temp_X_minus[offset + 49] + temp_X_minus[offset + 50] + temp_X_minus[offset + 51] + temp_X_minus[offset + 52] + temp_X_minus[offset + 53] + \
					temp_X_minus[offset + 54] + temp_X_minus[offset + 55] + temp_X_minus[offset + 56] + temp_X_minus[offset + 57] + temp_X_minus[offset + 58] + temp_X_minus[offset + 59] + \
					temp_X_minus[offset + 60] + temp_X_minus[offset + 61] + temp_X_minus[offset + 62] + temp_X_minus[offset + 63] + temp_X_minus[offset + 64] + temp_X_minus[offset + 65] + \
					temp_X_minus[offset + 66] + temp_X_minus[offset + 67] + temp_X_minus[offset + 68] + temp_X_minus[offset + 69] + temp_X_minus[offset + 70] + temp_X_minus[offset + 71] + \
					temp_X_minus[offset + 72] + temp_X_minus[offset + 73] + temp_X_minus[offset + 74] + temp_X_minus[offset + 75] + temp_X_minus[offset + 76] + temp_X_minus[offset + 77] + \
					temp_X_minus[offset + 78] + temp_X_minus[offset + 79] + temp_X_minus[offset + 80] + temp_X_minus[offset + 81] + temp_X_minus[offset + 82] + temp_X_minus[offset + 83] + \
					temp_X_minus[offset + 84] + temp_X_minus[offset + 85] + temp_X_minus[offset + 86] + temp_X_minus[offset + 87] + temp_X_minus[offset + 88] + temp_X_minus[offset + 89] + \
					temp_X_minus[offset + 90] + temp_X_minus[offset + 91] + temp_X_minus[offset + 92] + temp_X_minus[offset + 93] + temp_X_minus[offset + 94] + temp_X_minus[offset + 95] + \
					temp_X_minus[offset + 96] + temp_X_minus[offset + 97] + temp_X_minus[offset + 98] + temp_X_minus[offset + 99] + temp_X_minus[offset + 100] + temp_X_minus[offset + 101] + \
					temp_X_minus[offset + 102] + temp_X_minus[offset + 103] + temp_X_minus[offset + 104] + temp_X_minus[offset + 105] + temp_X_minus[offset + 106] + temp_X_minus[offset + 107] + \
					temp_X_minus[offset + 108] + temp_X_minus[offset + 109] + temp_X_minus[offset + 110] + temp_X_minus[offset + 111] + temp_X_minus[offset + 112] + temp_X_minus[offset + 113] + \
					temp_X_minus[offset + 114] + temp_X_minus[offset + 115] + temp_X_minus[offset + 116] + temp_X_minus[offset + 117] + temp_X_minus[offset + 118] + temp_X_minus[offset + 119] + \
					temp_X_minus[offset + 120] + temp_X_minus[offset + 121] + temp_X_minus[offset + 122] + temp_X_minus[offset + 123] + temp_X_minus[offset + 124] + temp_X_minus[offset + 125] + \
					temp_X_minus[offset + 126] + temp_X_minus[offset + 127];
		}
		__syncthreads();

		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3];
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3];
		}

		__syncthreads();
*/
		////////////////////////////////////////////////////////////////////////////////////////////////////////////
		// End 4 x 128 architecture
		////////////////////////////////////////////////////////////////////////////////////////////////////////////

		////////////////////////////////////////////////////////////////////////
		//
		// Try 16 x 32 architecture
		// 1. Declare two more shared arrays for sums
		// 2. Each array has 16 elements
		// 3. Sum 32 elements for each array element
		// Result: This code reduces the time to 15.1 ms
		//
		////////////////////////////////////////////////////////////////////////
/*		__shared__ double tempSumXplus [16];
		__shared__ double tempSumXminus [16];
		int offset;
		if (tid < 16){
			offset = tid * 32;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
					temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
					temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
					temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
					temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
					temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
					temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
					temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
					temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
					temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32];
		}
		__syncthreads();

		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] + \
					tempSumXplus[8] + tempSumXplus[9] + tempSumXplus[10] + tempSumXplus[11] +tempSumXplus[12] + tempSumXplus[13] + tempSumXplus[14] + tempSumXplus[15];
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7] + \
					tempSumXminus[8]+ tempSumXminus[9]+tempSumXminus[10]+tempSumXminus[11] + tempSumXminus[12]+ tempSumXminus[13]+tempSumXminus[14]+tempSumXminus[15];
		}

		__syncthreads();
*/
		////////////////////////////////////////////////////////////////////////////////////////////////////////////
		// End 16 x 32 architecture
		////////////////////////////////////////////////////////////////////////////////////////////////////////////

		////////////////////////////////////////////////////////////////////////
		//
		// Try 32 x 16 architecture
		// 1. Declare two more shared arrays for sums
		// 2. Each array has 32 elements
		// 3. Sum 16 elements for each array element
		// Result: This code reduces the time to 16.0 ms with 1 thread sum at end
		//		   15.5 ms with 2 thread sum at the end.
		////////////////////////////////////////////////////////////////////////
/*		__shared__ double tempSumXplus [32];
		__shared__ double tempSumXminus [32];
		int offset;
		if (tid < 32){
			offset = tid * 16;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
					temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
					temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
					temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15];
		}
		__syncthreads();
		////////////////////////////////////////////////////
		// One thread sum
		////////////////////////////////////////////////////
//		if (tid == 0){
//			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] + tempSumXplus[4] + tempSumXplus[5] + \
//					tempSumXplus[6] + tempSumXplus[7] + tempSumXplus[8] + tempSumXplus[9] + tempSumXplus[10] + tempSumXplus[11] + \
//					tempSumXplus[12] + tempSumXplus[13] + tempSumXplus[14] + tempSumXplus[15] + tempSumXplus[16] + tempSumXplus[17] + \
//					tempSumXplus[18] + tempSumXplus[19] + tempSumXplus[20] + tempSumXplus[21] + tempSumXplus[22] + tempSumXplus[23] + \
//					tempSumXplus[24] + tempSumXplus[25] + tempSumXplus[26] + tempSumXplus[27] + tempSumXplus[28] + tempSumXplus[29] + \
//					tempSumXplus[30] + tempSumXplus[31];
//			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0] + tempSumXminus[1] + tempSumXminus[2] + tempSumXminus[3] + tempSumXminus[4] + tempSumXminus[5] + \
//					tempSumXminus[6] + tempSumXminus[7] + tempSumXminus[8] + tempSumXminus[9] + tempSumXminus[10] + tempSumXminus[11] + \
//					tempSumXminus[12] + tempSumXminus[13] + tempSumXminus[14] + tempSumXminus[15] + tempSumXminus[16] + tempSumXminus[17] + \
//					tempSumXminus[18] + tempSumXminus[19] + tempSumXminus[20] + tempSumXminus[21] + tempSumXminus[22] + tempSumXminus[23] + \
//					tempSumXminus[24] + tempSumXminus[25] + tempSumXminus[26] + tempSumXminus[27] + tempSumXminus[28] + tempSumXminus[29] + \
//					tempSumXminus[30] + tempSumXminus[31];
//
//		}
//
//		__syncthreads();

		////////////////////////////////////////////////////
		// End one thread sum
		////////////////////////////////////////////////////

		////////////////////////////////////////////////////
		// two thread sum
		////////////////////////////////////////////////////
		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] + tempSumXplus[4] + tempSumXplus[5] + \
					tempSumXplus[6] + tempSumXplus[7] + tempSumXplus[8] + tempSumXplus[9] + tempSumXplus[10] + tempSumXplus[11] + \
					tempSumXplus[12] + tempSumXplus[13] + tempSumXplus[14] + tempSumXplus[15] + tempSumXplus[16] + tempSumXplus[17] + \
					tempSumXplus[18] + tempSumXplus[19] + tempSumXplus[20] + tempSumXplus[21] + tempSumXplus[22] + tempSumXplus[23] + \
					tempSumXplus[24] + tempSumXplus[25] + tempSumXplus[26] + tempSumXplus[27] + tempSumXplus[28] + tempSumXplus[29] + \
					tempSumXplus[30] + tempSumXplus[31];
		}
		if (tid == 1){
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0] + tempSumXminus[1] + tempSumXminus[2] + tempSumXminus[3] + tempSumXminus[4] + tempSumXminus[5] + \
					tempSumXminus[6] + tempSumXminus[7] + tempSumXminus[8] + tempSumXminus[9] + tempSumXminus[10] + tempSumXminus[11] + \
					tempSumXminus[12] + tempSumXminus[13] + tempSumXminus[14] + tempSumXminus[15] + tempSumXminus[16] + tempSumXminus[17] + \
					tempSumXminus[18] + tempSumXminus[19] + tempSumXminus[20] + tempSumXminus[21] + tempSumXminus[22] + tempSumXminus[23] + \
					tempSumXminus[24] + tempSumXminus[25] + tempSumXminus[26] + tempSumXminus[27] + tempSumXminus[28] + tempSumXminus[29] + \
					tempSumXminus[30] + tempSumXminus[31];
		}

		__syncthreads();
*/
		////////////////////////////////////////////////////
		// End two thread sum
		////////////////////////////////////////////////////

		////////////////////////////////////////////////////
		// End 32 x 16 architecture
		////////////////////////////////////////////////////

		////////////////////////////////////////////////////////////////////////
		//
		// Try 64 x 8 architecture
		// 1. Declare two more shared arrays for sums
		// 2. Each array has 64 elements
		// 3. Sum 8 elements for each array element
		// Result: This code reduces the time to >16.7 ms with 2 thread sum at end
		//
		////////////////////////////////////////////////////////////////////////
/*		__shared__ double tempSumXplus [64];
		__shared__ double tempSumXminus [64];
		int offset;
		if (tid < 64){
			offset = tid * 8;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
					temp_X_minus[offset + 6] + temp_X_minus[offset + 7];
		}
		__syncthreads();

		////////////////////////////////////////////////////
		// two thread sum
		////////////////////////////////////////////////////
		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] + tempSumXplus[4] + tempSumXplus[5] + \
					tempSumXplus[6] + tempSumXplus[7] + tempSumXplus[8] + tempSumXplus[9] + tempSumXplus[10] + tempSumXplus[11] + \
					tempSumXplus[12] + tempSumXplus[13] + tempSumXplus[14] + tempSumXplus[15] + tempSumXplus[16] + tempSumXplus[17] + \
					tempSumXplus[18] + tempSumXplus[19] + tempSumXplus[20] + tempSumXplus[21] + tempSumXplus[22] + tempSumXplus[23] + \
					tempSumXplus[24] + tempSumXplus[25] + tempSumXplus[26] + tempSumXplus[27] + tempSumXplus[28] + tempSumXplus[29] + \
					tempSumXplus[30] + tempSumXplus[31] + tempSumXplus[32] + tempSumXplus[33] + tempSumXplus[34] + tempSumXplus[35] + \
					tempSumXplus[36] + tempSumXplus[37] + tempSumXplus[38] + tempSumXplus[39] + tempSumXplus[40] + tempSumXplus[41] + \
					tempSumXplus[42] + tempSumXplus[43] + tempSumXplus[44] + tempSumXplus[45] + tempSumXplus[46] + tempSumXplus[47] + \
					tempSumXplus[48] + tempSumXplus[49] + tempSumXplus[50] + tempSumXplus[51] + tempSumXplus[52] + tempSumXplus[53] + \
					tempSumXplus[54] + tempSumXplus[55] + tempSumXplus[56] + tempSumXplus[57] + tempSumXplus[58] + tempSumXplus[59] + \
					tempSumXplus[60] + tempSumXplus[61] + tempSumXplus[62] + tempSumXplus[63];

		}
		if (tid == 1){
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] + tempSumXplus[4] + tempSumXplus[5] + \
					tempSumXplus[6] + tempSumXplus[7] + tempSumXplus[8] + tempSumXplus[9] + tempSumXplus[10] + tempSumXplus[11] + \
					tempSumXplus[12] + tempSumXplus[13] + tempSumXplus[14] + tempSumXplus[15] + tempSumXplus[16] + tempSumXplus[17] + \
					tempSumXplus[18] + tempSumXplus[19] + tempSumXplus[20] + tempSumXplus[21] + tempSumXplus[22] + tempSumXplus[23] + \
					tempSumXplus[24] + tempSumXplus[25] + tempSumXplus[26] + tempSumXplus[27] + tempSumXplus[28] + tempSumXplus[29] + \
					tempSumXplus[30] + tempSumXplus[31] + tempSumXplus[32] + tempSumXplus[33] + tempSumXplus[34] + tempSumXplus[35] + \
					tempSumXplus[36] + tempSumXplus[37] + tempSumXplus[38] + tempSumXplus[39] + tempSumXplus[40] + tempSumXplus[41] + \
					tempSumXplus[42] + tempSumXplus[43] + tempSumXplus[44] + tempSumXplus[45] + tempSumXplus[46] + tempSumXplus[47] + \
					tempSumXplus[48] + tempSumXplus[49] + tempSumXplus[50] + tempSumXplus[51] + tempSumXplus[52] + tempSumXplus[53] + \
					tempSumXplus[54] + tempSumXplus[55] + tempSumXplus[56] + tempSumXplus[57] + tempSumXplus[58] + tempSumXplus[59] + \
					tempSumXplus[60] + tempSumXplus[61] + tempSumXplus[62] + tempSumXplus[63];

		}

		__syncthreads();
*/
		////////////////////////////////////////////////////
		// End two thread sum
		////////////////////////////////////////////////////

		////////////////////////////////////////////////////
		// End 64 x 8 architecture
		////////////////////////////////////////////////////


	} // end for loop


	__syncthreads();
	return;

}// end kernelLowMemCalculateXplusXminus

//////////////////////////////////////////////////////////////////////////////////////////////
//
// kernelVaryArchCalculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
// and each alpha value, so it will calculate 500 x 80 = 40,000 values.
//
// This Kernel will vary the number of blocks called.  Each block will still hold 512 threads.
// The extremes are 80x500 blocks, where each block calculates one X_minus and X_plus,
// and 80x1 blocks, where each block calculates 500 X_minus and X_plus.
//
//
//	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX){
//		p1 = data[k];
//		X_left = k;
//		for( int q = 0; q< N; q++){
//			if (q < X_left)
//				x_minus_tmp[q] = abs(data[q] - p1);
//			else
//				x_plus_tmp[q - X_left] = data[q] - p1;
//		}
//
//		for ( int q = 0; q < sizeA; q++){
//			for (int t = 0; t < N; t++){
//				if (t < X_left)
//					X_minus[q] += pow(x_minus_tmp[t], alpha) / N;
//				else
//					X_plus[q] += pow(x_plus_tmp[t - X_left], alpha)/ N;
//			}
//		}
//	}
//
//  Performance: 80x500 = 14.3 ms per run, 80x1 = 14.6 ms per run
//	80 x 20 = 14.7 ms per run
//  80 x 50 = 14.9 ms per run
//
//	EX: 80 x 20 blocks, each block calculates 20 X_minus, X_plus values.
//	int dataIndex = blockID/500: dataIndex ranges from [0-19]
//  for (int j = 0; j < 25; j++)
//  	k = dataIndex * 25 + j; 		// dataIndex = 0: k = [0- 24] dataIndex = 1: k = [25-49], etc.
// 		p1 = data[k];
//		if (tid < k), calculate temp_X_minus, temp_X_plus
//		sum up temp_X_minus, temp_X_plus.
//		dev_X_minus[k] = sum temp_X_minus
//
/////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ void  kernelVaryArchCalculateXplusXminus (int * dev_intData, double *dev_alpha, double * dev_deviationMatrix, double *dev_X_plus, double *dev_X_minus, double *dev_debugBig, double *dev_debug)
{

	int tid =threadIdx.x *blockDim.y + threadIdx.y;
	int blockID = blockIdx.x*gridDim.y + blockIdx.y;
	int dataIndex = blockID / 80;			// dataIndex ranges from 0 to 19
	int alphaIndex = blockID % 80;			// q
	int threadsInBlock = blockDim.y*blockDim.x;

	int k;
	int p1;
	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];
	__shared__ int data[windowSize];
	__shared__ double tempSumXplus [8];
	__shared__ double tempSumXminus [8];
	int offset;

	if (tid < windowSize){
		data[tid] = dev_intData[tid];
	}
	__syncthreads();

	//////////////////////////////////////////////////////////////////////////////////////////////////
	// Ex: for Grid (80,20)
	//
	// Here dataIndex ranges from 0 to 19
	// Assume dataIndex = 0, this is a set of 80 blocks with alphaIndex = 0 - 79.
	// now with j = 0, this will calculate dev_X_minus [ 0-79] j = 10 -> k = 10, dev_X_minus [800-879]
	// to j = 24 -> k = 24, dev_X_minus [ 24*80 -> 24*80 + 79]
	//////////////////////////////////////////////////////////////////////////////////////////////////


	for (int j = 0; j < 10; j++) {
		k = dataIndex * 10 + j; // k ranges from 0-499
		p1 = data[k];
		if (tid <= k){
	//		temp_X_minus[tid] = pow(abs(dev_data[tid]-p1), alpha);
			temp_X_minus[tid] = dev_deviationMatrix[(abs(data[tid]-p1))*sizeAlpha + alphaIndex];
			temp_X_plus[tid] = 0;
		}
		else if (tid >dataIndex && tid < windowSize){
			temp_X_minus[tid] = 0;
	//		temp_X_plus[tid] = pow((dev_data[tid]-p1), alpha);
			temp_X_plus[tid] = dev_deviationMatrix[(abs(data[tid]-p1))*sizeAlpha + alphaIndex];
		}
		else {
			temp_X_minus[tid] = 0;
			temp_X_plus[tid] = 0;
		}
		__syncthreads();

		// Sum results in fastest manner for 512 threads.

		if (tid < 8){
			offset = tid * 64;
			tempSumXplus [tid]=temp_X_plus[offset + 0] + temp_X_plus[offset + 1] + temp_X_plus[offset + 2] + temp_X_plus[offset + 3] + temp_X_plus[offset + 4] + temp_X_plus[offset + 5] + \
					temp_X_plus[offset + 6] + temp_X_plus[offset + 7] + temp_X_plus[offset + 8] + temp_X_plus[offset + 9] + temp_X_plus[offset + 10] + temp_X_plus[offset + 11] + \
					temp_X_plus[offset + 12] + temp_X_plus[offset + 13] + temp_X_plus[offset + 14] + temp_X_plus[offset + 15] + temp_X_plus[offset + 16] + temp_X_plus[offset + 17] + \
					temp_X_plus[offset + 18] + temp_X_plus[offset + 19] + temp_X_plus[offset + 20] + temp_X_plus[offset + 21] + temp_X_plus[offset + 22] + temp_X_plus[offset + 23] + \
					temp_X_plus[offset + 24] + temp_X_plus[offset + 25] + temp_X_plus[offset + 26] + temp_X_plus[offset + 27] + temp_X_plus[offset + 28] + temp_X_plus[offset + 29] + \
					temp_X_plus[offset + 30] + temp_X_plus[offset + 31] + temp_X_plus[offset + 32] + temp_X_plus[offset + 33] + temp_X_plus[offset + 34] + temp_X_plus[offset + 35] + \
					temp_X_plus[offset + 36] + temp_X_plus[offset + 37] + temp_X_plus[offset + 38] + temp_X_plus[offset + 39] + temp_X_plus[offset + 40] + temp_X_plus[offset + 41] + \
					temp_X_plus[offset + 42] + temp_X_plus[offset + 43] + temp_X_plus[offset + 44] + temp_X_plus[offset + 45] + temp_X_plus[offset + 46] + temp_X_plus[offset + 47] + \
					temp_X_plus[offset + 48] + temp_X_plus[offset + 49] + temp_X_plus[offset + 50] + temp_X_plus[offset + 51] + temp_X_plus[offset + 52] + temp_X_plus[offset + 53] + \
					temp_X_plus[offset + 54] + temp_X_plus[offset + 55] + temp_X_plus[offset + 56] + temp_X_plus[offset + 57] + temp_X_plus[offset + 58] + temp_X_plus[offset + 59] + \
					temp_X_plus[offset + 60] + temp_X_plus[offset + 61] + temp_X_plus[offset + 62] + temp_X_plus[offset + 63];

			tempSumXminus[tid] = temp_X_minus[offset + 0] + temp_X_minus[offset + 1] + temp_X_minus[offset + 2] + temp_X_minus[offset + 3] + temp_X_minus[offset + 4] + temp_X_minus[offset + 5] + \
				temp_X_minus[offset + 6] + temp_X_minus[offset + 7] + temp_X_minus[offset + 8] + temp_X_minus[offset + 9] + temp_X_minus[offset + 10] + temp_X_minus[offset + 11] + \
				temp_X_minus[offset + 12] + temp_X_minus[offset + 13] + temp_X_minus[offset + 14] + temp_X_minus[offset + 15] + temp_X_minus[offset + 16] + temp_X_minus[offset + 17] + \
				temp_X_minus[offset + 18] + temp_X_minus[offset + 19] + temp_X_minus[offset + 20] + temp_X_minus[offset + 21] + temp_X_minus[offset + 22] + temp_X_minus[offset + 23] + \
				temp_X_minus[offset + 24] + temp_X_minus[offset + 25] + temp_X_minus[offset + 26] + temp_X_minus[offset + 27] + temp_X_minus[offset + 28] + temp_X_minus[offset + 29] + \
				temp_X_minus[offset + 30] + temp_X_minus[offset + 31] + temp_X_minus[offset + 32] + temp_X_minus[offset + 33] + temp_X_minus[offset + 34] + temp_X_minus[offset + 35] + \
				temp_X_minus[offset + 36] + temp_X_minus[offset + 37] + temp_X_minus[offset + 38] + temp_X_minus[offset + 39] + temp_X_minus[offset + 40] + temp_X_minus[offset + 41] + \
				temp_X_minus[offset + 42] + temp_X_minus[offset + 43] + temp_X_minus[offset + 44] + temp_X_minus[offset + 45] + temp_X_minus[offset + 46] + temp_X_minus[offset + 47] + \
				temp_X_minus[offset + 48] + temp_X_minus[offset + 49] + temp_X_minus[offset + 50] + temp_X_minus[offset + 51] + temp_X_minus[offset + 52] + temp_X_minus[offset + 53] + \
				temp_X_minus[offset + 54] + temp_X_minus[offset + 55] + temp_X_minus[offset + 56] + temp_X_minus[offset + 57] + temp_X_minus[offset + 58] + temp_X_minus[offset + 59] + \
				temp_X_minus[offset + 60] + temp_X_minus[offset + 61] + temp_X_minus[offset + 62] + temp_X_minus[offset + 63];
		}
		__syncthreads();
		if (tid == 0){
			dev_X_plus[k*sizeAlpha + alphaIndex] = tempSumXplus[0] + tempSumXplus[1] + tempSumXplus[2] + tempSumXplus[3] +tempSumXplus[4] + tempSumXplus[5] + tempSumXplus[6] + tempSumXplus[7] ;
			dev_X_minus[k*sizeAlpha + alphaIndex] = tempSumXminus[0]+ tempSumXminus[1]+tempSumXminus[2]+tempSumXminus[3] + tempSumXminus[4]+ tempSumXminus[5]+tempSumXminus[6]+tempSumXminus[7];
		}
		__syncthreads();
	}

}// kernelVaryArchCalculateXplusXminus






/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/

void calculateXplusXminusWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_deviationMatrix){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	/////////////////////////////////////////////////////////////////////////
	// Perform multiplication and rounding here
	/////////////////////////////////////////////////////////////////////////



	int * integerData;
	integerData = (int *) malloc (sizeof(int) * windowSize);

	for(int i = 0; i < windowSize; i++){
		integerData[i] = round(data[i] * FACTOR);
	}
	////////////////////////////////////////////////////////////////////////////////
	//
	// kernelFastCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	//
	// dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	// dev_X_plus and dev_X_minus will be [500x80]
	//
	////////////////////////////////////////////////////////////////////////////////
    int *dev_intData;
	double *dev_data, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_intData, sizeof(int) * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy integerData to dev_intData.
    cudaMemcpy( dev_intData, integerData, sizeof(int) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );

//    dim3 Grid(lengthDevData, lengthDevAlpha);
//    dim3 Block(16, 32);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

//   kernelFastCalculateXplusXminus <<<Grid, Block>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);


    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
/*    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "For k = 275" << endl;
//    for (int i = 0; i < lengthDevAlpha; i++){
//    	int offset = 275*80;
//    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
//    }

    cout << "X_minus fast" << endl;
	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
	}
*/
    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////


	////////////////////////////////////////////////////////////////////////////////
	//
	// kernelLowMemCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	//
	// dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	// dev_X_plus and dev_X_minus will be [500x80]
	//
	////////////////////////////////////////////////////////////////////////////////

    dim3 Grid3(10, 8);
    dim3 Block3(16, 32);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

    kernelLowMemCalculateXplusXminus <<<Grid3, Block3>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);

    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelLowMemCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
/*    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
//    cout << "For k = 275" << endl;
//    for (int i = 0; i < lengthDevAlpha; i++){
//    	int offset = 275*80;
//    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
//    }

    cout << "Low Memory: X_minus fast" << endl;
	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
	}
*/
    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelLowMemCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
	/////////////////////////////////////////////////////////////////////////
	// End kernelLowMemCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
 	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
 	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
 	// This is done to be able to debug as each block simulates one alpha
 	// because of the fact that we are trying to find a minimum over all H[k][q]
 	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
 	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
 	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

 	//int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
 	//int KCP_devSize =  80; // Need to fix this to be written dynamically later

     double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
     double *Kappa, *Sigma, *dataParam, *H;

     // Allocate host memory for Kappa, Sigma, dataParam
     Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
     Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
     dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
     H = (double *) malloc(sizeof(double)*lengthDevAlpha);

     // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
     cudaMalloc( (void **)&dev_H, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_Kappa, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_Sigma, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_dataParam, sizeof(double) * sizeAlpha);

     dim3 BlockKCP(16,32);
     dim3 GridKCP(10,8);

    kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
 	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );



    /////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

/*	cout << "Debugging kernel calculate Parameters" << endl;
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     H[" << i << "] = " << H[i];
//		cout << "     dataParam[" << i << "] = " << dataParam[i];
//		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}

*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha




	////////////////////////////////////////////////////////////////////////////////////////////////
    // Clear memeory
    ///////////////////////////////////////////////////////////////////////////////////////////////
    free (Kappa);
    free (Sigma);
    free (dataParam);
    free (H);
    free (debug1);
    free (debug2);
    free (debugBig);
    free (integerData);
    cudaFree (dev_data);
    cudaFree (dev_intData);
    cudaFree (dev_X_plus);
    cudaFree (dev_X_minus);
    cudaFree (dev_debugBig);
    cudaFree (dev_debug);
    cudaFree (dev_Kappa);
    cudaFree (dev_Sigma);
    cudaFree (dev_dataParam);
    cudaFree (dev_H);

    return;
} //calculateXplusXminusWrapper


/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of AEPD_Fit()
 * from AEPD_Dist.cpp
 *
 ****************************************************************************************************************/

void varyArchCalculateXplusXminusWrapper (double *data, double *param, double *dev_alpha, double *dev_lnGammaofAlpha, double *dev_deviationMatrix){

	////////////////////////////////////////////////////////////////////////////
	// Sort the data as localAEPD_Fit() does
	///////////////////////////////////////////////////////////////////////////

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	/////////////////////////////////////////////////////////////////////////
	// Perform multiplication and rounding here
	/////////////////////////////////////////////////////////////////////////



	int * integerData;
	integerData = (int *) malloc (sizeof(int) * windowSize);

	for(int i = 0; i < windowSize; i++){
		integerData[i] = round(data[i] * FACTOR);
	}
	////////////////////////////////////////////////////////////////////////////////
	//
	// kernelFastCalculateXplusXminus is a CUDA kernel to calculate X_plus, X_minus
	//
	// dev_data will be [500], dev_alpha will be [80], dev_lnGammaofAlpha will be [80]
	// dev_X_plus and dev_X_minus will be [500x80]
	//
	////////////////////////////////////////////////////////////////////////////////
    int *dev_intData;
	double *dev_data, *dev_X_plus, *dev_X_minus, *dev_debugBig, *dev_debug;
    double *debug1, *debug2, *debugBig;

    int lengthDevData = windowSize;
    int lengthDevAlpha = 80;

    // allocate host memory
    debug1 = (double *) malloc (lengthDevData*sizeof(double));
    debug2 = (double *) malloc (lengthDevData*sizeof(double));
    debugBig = (double *) malloc (lengthDevData*lengthDevAlpha*sizeof(double));

    // allocate device memory
    cudaMalloc( (void **)&dev_data, sizeof(double) * lengthDevData );
    cudaMalloc( (void **)&dev_intData, sizeof(int) * lengthDevData );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debugBig, sizeof(double) * lengthDevAlpha * lengthDevData );
    cudaMalloc( (void **)&dev_debug, sizeof(double) * lengthDevAlpha );

    // copy integerData to dev_intData.
    cudaMemcpy( dev_intData, integerData, sizeof(int) * lengthDevData, cudaMemcpyHostToDevice );
    cudaMemcpy( dev_data, data, sizeof(double) * lengthDevData, cudaMemcpyHostToDevice );

    dim3 Grid(80, 50);
    dim3 Block(16, 32);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Launch Kernel, dev_intData copied in this wrapper,
    // dev_deviationMatrix, dev_alpha copied once when program launched
    // dev_X_plus, dev_X_minus are written
    ////////////////////////////////////////////////////////////////////////////////////////////

   kernelVaryArchCalculateXplusXminus <<<Grid, Block>>> (dev_intData, dev_alpha, dev_deviationMatrix, dev_X_plus, dev_X_minus, dev_debugBig, dev_debug);


    /////////////////////////////////////////////////////////////////////////////////////////////
    // debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
//    cudaMemcpy(debugBig , dev_X_minus, sizeof(double) * lengthDevData * lengthDevAlpha, cudaMemcpyDeviceToHost );
////    cout << "For k = 275" << endl;
////    for (int i = 0; i < lengthDevAlpha; i++){
////    	int offset = 275*80;
////    	cout << "device X_minus[" << i << "] = " << debugBig[offset+i] << endl;
////    }
//
//    cout << "X_minus fast" << endl;
//	for (int i = 0; i < lengthDevAlpha*lengthDevData; i++){
//		cout << "device X_minus[" << i << "] = " << debugBig[i] << endl;
//	}

    /////////////////////////////////////////////////////////////////////////////////////////////
    // end debugging for kernelFastCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////////////////////////
	/////////////////////////////////////////////////////////////////////////
	// End kernelCalculateXplusXminus
    /////////////////////////////////////////////////////////////////////////



    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // kernelcalculateParameters() is a CUDA kernel
 	// it calculates Kappa[k][q], Sigma[k][q], and H[k][q]
    // where k and q make up the index to dev_X_minus and dev_X_plus
 	//
    // This Kernel uses the values of dev_X_minus and dev_X_plus that were calculated by
    // the kernelCalculateXplusXminus() Kernel.
    //
    // Right now, launch with Blocks(16,32) Grid(10,8)
 	// This is done to be able to debug as each block simulates one alpha
 	// because of the fact that we are trying to find a minimum over all H[k][q]
 	// the launch could be varied
    //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    // Allocate global memories for Kappa[], Sigma[], dataParam[]
 	// Because initial launch is 512 threads per block, each block can calculate a min for 512 elements
 	// So Kappa[], Sigma[] and dataParam[] need to store the 80 mins (corresponding to a min for each alpha)

 	//int KCP_threadsPerBlock = 512; //KCP_threadsPerBlock
 	//int KCP_devSize =  80; // Need to fix this to be written dynamically later

     double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_H;
     double *Kappa, *Sigma, *dataParam, *H;

     // Allocate host memory for Kappa, Sigma, dataParam
     Kappa = (double *) malloc(sizeof(double) * lengthDevAlpha);
     Sigma = (double *) malloc(sizeof(double) * lengthDevAlpha);
     dataParam = (double *) malloc(sizeof(double) * lengthDevAlpha);
     H = (double *) malloc(sizeof(double)*lengthDevAlpha);

     // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
     cudaMalloc( (void **)&dev_H, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_Kappa, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_Sigma, sizeof(double) * sizeAlpha);
     cudaMalloc( (void **)&dev_dataParam, sizeof(double) * sizeAlpha);

     dim3 BlockKCP(16,32);
     dim3 GridKCP(10,8);

    kernelCalculateParameters <<<GridKCP, BlockKCP>>> (dev_X_minus, dev_X_plus, dev_H, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_alpha, dev_lnGammaofAlpha, dev_debugBig);
 	cudaMemcpy(Kappa , dev_Kappa, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(Sigma , dev_Sigma, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(dataParam , dev_dataParam, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );
 	cudaMemcpy(H , dev_H, sizeof(double) * lengthDevAlpha, cudaMemcpyDeviceToHost );



    /////////////////////////////////////////////////////////////////////////
	// debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

/*	cout << "Debugging kernel calculate Parameters" << endl;
	for (int i = 0; i < lengthDevAlpha; i++){
		cout << "Kappa[" << i << "] = " << Kappa[i];
		cout << "     Sigma[" << i << "] = " << Sigma[i];
		cout << "     H[" << i << "] = " << H[i];
//		cout << "     dataParam[" << i << "] = " << dataParam[i];
//		cout << "     alpha[" << i << "] = " << alpha[i];
		cout << endl;
	}

*/
	/////////////////////////////////////////////////////////////////////////
	// End debugging kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////


	/////////////////////////////////////////////////////////////////////////
	// End kernelcalculateParameters()
    /////////////////////////////////////////////////////////////////////////

	////////////////////////////////////////////////////////////////////////
	// Final Min: H holds 80 minimum values for each alpha value, find the min
	// index of H and exit.  This operation can be done on the host
	// or the device side, we can see which is faster.
	//
	//	if(q < sizeA -2 && q >=1 && H[q][k] < min_H ){
	//		min_H = H[q][k];
	//		idx_k = k;
	//		idx_q = q;
	//	}
	//
	//  This means that blocks 0, 79,78 cannot be the index.
	//
	//
	/////////////////////////////////////////////////////////////////////////

	// Host side code:
	double minH = 1e10;
	int indexAlpha = 0;
	for (int i = 1; i<lengthDevAlpha-2; i++){
		if (H[i] < minH){
			minH = H[i];
			indexAlpha = i;
		}
	}

	param[0] = dataParam[indexAlpha]; // theta
	param[1] = Sigma[indexAlpha]; // sigma
	param[2] = Kappa[indexAlpha]; // kappa
	param[3] = 0.7 + indexAlpha * 0.1; // alpha




	////////////////////////////////////////////////////////////////////////////////////////////////
    // Clear memeory
    ///////////////////////////////////////////////////////////////////////////////////////////////
    free (Kappa);
    free (Sigma);
    free (dataParam);
    free (H);
    free (debug1);
    free (debug2);
    free (debugBig);
    free (integerData);
    cudaFree (dev_data);
    cudaFree (dev_intData);
    cudaFree (dev_X_plus);
    cudaFree (dev_X_minus);
    cudaFree (dev_debugBig);
    cudaFree (dev_debug);
    cudaFree (dev_Kappa);
    cudaFree (dev_Sigma);
    cudaFree (dev_dataParam);
    cudaFree (dev_H);

    return;
} //calculateXplusXminusWrapper



void printSum(void){
	for (int i = 0; i < 128; i++){
		cout << "temp_X_plus[offset + " << i << "] + ";
		if (( i + 1) % 6 == 0)
			cout << "\\" << endl;
	}
	cout << endl << endl << endl;
	for (int i = 0; i < 128; i++){
		cout << "temp_X_minus[offset + " << i << "] + ";
		if (( i + 1) % 6 == 0)
			cout << "\\" << endl;
	}
	cout << endl << endl << endl;
	for (int i = 0; i < 64; i++){
		cout << "tempSumXplus[" << i << "] + ";
		if (( i + 1) % 6 == 0)
			cout << "\\" << endl;
	}
	cout << endl << endl << endl;
	for (int i = 0; i < 64; i++){
		cout << "tempSumXminus[" << i << "] + ";
		if (( i + 1) % 6 == 0)
			cout << "\\" << endl;
	}
	cout << endl << endl << endl;

}


int main(void){
	  ///////////////////////////////////////////////////////////////////////////////
	  // Function to print code:
	  //////////////////////////////////////////////////////////////////////////////
//	  printSum();


	  TimeVal startTime, endTime;
	  long totalTime;

	  ///////////////////////////////////////////////////////////////////////////////
	  // alpha and log-gamma(1/alpha) values are constant, declare and copy them here
	  ///////////////////////////////////////////////////////////////////////////////
	  int lengthDevAlpha = 80;
	  double *alpha, *logGammaofAlpha;
	  alpha = (double *) malloc (lengthDevAlpha*sizeof(double));
	  logGammaofAlpha = (double *) malloc (lengthDevAlpha*sizeof(double));

	  double sgngam = 0;

	  for (int i = 0; i < lengthDevAlpha; i++){
		  alpha[i] = 0.7 + i*0.1;
		  logGammaofAlpha[i] = locallngamma(1/alpha[i], &sgngam);
	  }
	  double *dev_alpha, *dev_lnGammaofAlpha;

	  cudaMalloc( (void **)&dev_alpha, sizeof(double) * lengthDevAlpha );
	  cudaMalloc( (void **)&dev_lnGammaofAlpha, sizeof(double) * lengthDevAlpha );

	  cudaMemcpy( dev_alpha, alpha, sizeof(double) * lengthDevAlpha, cudaMemcpyHostToDevice );
	  cudaMemcpy( dev_lnGammaofAlpha, logGammaofAlpha, sizeof(double)*lengthDevAlpha, cudaMemcpyHostToDevice );

		 //////////////////////////////////////////////////////////////////////
		 // Test Aleks' code with one launch
		 // First read in the data array (500 points) and then pass
		 // it to the CUDA wrapper
		 /////////////////////////////////////////////////////////////////////
		double *dataArray, *param;
		dataArray = (double *) malloc (windowSize*sizeof(double));
		param = (double *) malloc (4 * sizeof(double));
		fstream myfile("gbpusd.txt", ios::in);
		if (!myfile.is_open()){
			cout << "Error opening gbpusd text file " << endl;
			return 1;
		}
		double d = 0;
		int index= 0;
		while (index < windowSize){
			myfile >> d;
			dataArray[index++] = d;
		}
		// Finished reading and assigning data


		////////////////////////////////////////////////////////////////////////////
		// Generate a deviation matrix:
		////////////////////////////////////////////////////////////////////////////

		double inc_A = 0.1;//////Iteration Increment of Alpha Parameter
		double start_A = 0.7;//////// Value to Start Alpha Iteration
		double  *deviationMatrix, *DIV;
		//alpha = (double *) malloc (sizeAlpha*sizeof(double));
		deviationMatrix = (double *) malloc(sizeAlpha*numDIV*sizeof(double));
		DIV = (double *) malloc(numDIV * sizeof(double));
		for (int i = 0; i < sizeAlpha; i++){
			  alpha[i] = start_A + i*inc_A;
		}

		for(int i = 0; i<numDIV; i++)
		{
			DIV[i] = minDIV*i;
		}

		for(int j = 0; j<numDIV; j++)
		{
			for(int i = 0; i<sizeAlpha; i++)
			{
				deviationMatrix[i + j*sizeAlpha] = pow(DIV[j],alpha[i])/windowSize;

			///	fprintf(dMat,"%lf\t",D[j][i]);
			}
			///fprintf(dMat,"\n");
		}
		///fclose(dMat);
		///////////////////////////////////////////////////////////////////////////////
		// Copy deviation matrix to GPU global memory
		///////////////////////////////////////////////////////////////////////////////

		double *dev_deviationMatrix;
		cudaMalloc( (void **)&dev_deviationMatrix, sizeof(double) * sizeAlpha*numDIV);
		cudaMemcpy( dev_deviationMatrix, deviationMatrix, sizeof(double) * sizeAlpha*numDIV, cudaMemcpyHostToDevice );

		calculateXplusXminusWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);
		cout<< "Parameters from fast sum Cuda AEPD_Fit" << endl;
		for (int i = 0; i < 4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}

		CUDAfastAEPD_DistWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);

		cout<< "Parameters from fast CUDA AEPD_Fit" << endl;
		for (int i = 0; i<4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}

		varyArchCalculateXplusXminusWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);

		cout<< "Parameters from fast CUDA AEPD_Fit" << endl;
		for (int i = 0; i<4; i++){
			cout << "Param[" << i << "] = " << param[i]<<endl;
		}
		////////////////////////////////////////////////////////////////////////
		// Code to test performance: first use local C++ clock methods
		//////////////////////////////////////////////////////////////////////

		// CUDAfastAEPD_DistWrapper, global memory, local C++ clock
		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			CUDAfastAEPD_DistWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);
		}

		assert(GetTime(endTime)==0);
		totalTime=GetTimeDifference(startTime, endTime);
		cout << "Total time = " << totalTime << " microseconds." << endl;
		cout << "Average time of global memory Cuda : " << totalTime/100 << " microseconds." << endl;

		// calculateXplusXminusWrapper
		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			calculateXplusXminusWrapper (dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);
		}

		assert(GetTime(endTime)==0);
		totalTime=GetTimeDifference(startTime, endTime);
		cout << "X_plus, X_minus calculation time" << endl;
		cout << "Total time = " << totalTime << " microseconds." << endl;
		cout << "Average time of global memory Cuda : " << totalTime/100 << " microseconds." << endl;

		//varyArchCalculateXplusXminusWrapper
		assert(GetTime(startTime)==0);
		for (int i = 0; i < 100; i++){
			varyArchCalculateXplusXminusWrapper(dataArray, param, dev_alpha, dev_lnGammaofAlpha, dev_deviationMatrix);
		}

		assert(GetTime(endTime)==0);
		totalTime=GetTimeDifference(startTime, endTime);
		cout << "X_plus, X_minus calculation time" << endl;
		cout << "Total time = " << totalTime << " microseconds." << endl;
		cout << "Average time of global memory Cuda : " << totalTime/100 << " microseconds." << endl;

		////////////////////////////////////////////////////////////////////////////////////////
		// End performance test
		////////////////////////////////////////////////////////////////////////////////////////
		myfile.close();
		cudaFree(dev_deviationMatrix);
		cudaFree(dev_alpha);
		cudaFree(dev_lnGammaofAlpha);
		free(deviationMatrix);
		free(alpha);
		free(DIV);
		return 0;
}

/*************************************************************************************
 * Log- Gamma function adapted from Alglib project
 *
 * Copy right and comments from original:
 ***************************************************************************************/

double locallngamma(double x, double* sgngam)
{

    double a;
    double b;
    double c;
    double p;
    double q;
    double u;
    double w;
    double z;
    int i;
    double logpi;
    double ls2pi;
    double tmp;
    double result;

    *sgngam = 0;

    *sgngam = 1;
    logpi = 1.14472988584940017414;
    ls2pi = 0.91893853320467274178;
    if( x<-34.0 )
    {
        q = -x;
        w = locallngamma(q, &tmp);
        p = floor(q);
        i = floor(p+0.5);
        if( i%2==0 )
        {
            *sgngam = -1;
        }
        else
        {
            *sgngam = 1;
        }
        z = q-p;
        if( z>0.5 )
        {
            p = p+1;
            z = p-q;
        }
        z = q*sin(ae_pi*z);
        result = logpi-log(z)-w;
        return result;
    }
    if( x<13 )
    {
        z = 1;
        p = 0;
        u = x;
        while(u>=3)
        {
            p = p-1;
            u = x+p;
            z = z*u;
        }
        while(u<2)
        {
            z = z/u;
            p = p+1;
            u = x+p;
        }
        if( z<0 )
        {
            *sgngam = -1;
            z = -z;
        }
        else
        {
            *sgngam = 1;
        }
        if( u==2)
        {
            result = log(z);
            return result;
        }
        p = p-2;
        x = x+p;
        b = -1378.25152569120859100;
        b = -38801.6315134637840924+x*b;
        b = -331612.992738871184744+x*b;
        b = -1162370.97492762307383+x*b;
        b = -1721737.00820839662146+x*b;
        b = -853555.664245765465627+x*b;
        c = 1;
        c = -351.815701436523470549+x*c;
        c = -17064.2106651881159223+x*c;
        c = -220528.590553854454839+x*c;
        c = -1139334.44367982507207+x*c;
        c = -2532523.07177582951285+x*c;
        c = -2018891.41433532773231+x*c;
        p = x*b/c;
        result = log(z)+p;
        return result;
    }
    q = (x-0.5)*log(x)-x+ls2pi;
    if(x>100000000 )
    {
        result = q;
        return result;
    }
    p = 1/(x*x);
    if( x>=1000.0 )
    {
        q = q+((7.9365079365079365079365*0.0001*p-2.7777777777777777777778*0.001)*p+0.0833333333333333333333)/x;
    }
    else
    {
        a = 8.11614167470508450300*0.0001;
        a = -5.95061904284301438324*0.0001+p*a;
        a = 7.93650340457716943945*0.0001+p*a;
        a = -2.77777777730099687205*0.001+p*a;
        a = 8.33333333333331927722*0.01+p*a;
        q = q+a/x;
    }
    result = q;
    return result;

} // end locallngamma
