/*
 * nvcc -arch=sm_13 -g -G TestGammaCuda.cu
 *
 *  Created on: Apr 17, 2011
 *      Author: skyang
 */
#include <vector>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include "math_functions.h"
#define windowSize 500
#define threadsPerBlock 512
#define EDGE 5
#define ae_pi 3.1415926535897932384626433832795
using namespace std;
//extern "C" void kernel_wrapper(double *a, double *c);



/********************************************************************************
 * copyData: Cuda global function that will take the data from global memory a
 * manipulate it, copy to C.
 ******************************************************************************/

__global__ void copyData(double *data, double *manipulatedData)
{
    int tx = threadIdx.x;
    // Manipulate the data
    manipulatedData[tx] = data[tx]+0.1;
    // exit and do nothing else, this should sync the blocks.
}
/*************************************************************************
 * Take the manipulatedData set in copyData(), now times it by 2.
 *************************************************************************/


__global__ void multiplyDataby2(double *manipulatedData)
{
    int tx = threadIdx.x;
    // Manipulate the data
    manipulatedData[tx] = manipulatedData[tx]*2;
}

/********************************************************************
 * Function to check sums, using one shared memory array
 *********************************************************************/

__global__ void calculateSums (double * data, double * leftSum, double *rightSum)
{
	int tid = threadIdx.x;
	int blockid = blockIdx.x;
	__shared__ double temp [threadsPerBlock];
	/************************************************************
	 * if thread id <= block id and thread id is less than the max index for data
	 * assign the data to temp[],
	 * otherwise assign 0
	 ************************************************************/
	if (tid <= blockid  && tid < 512)
		temp[tid] = data[tid];
	else
		temp[tid] = 0;

	__syncthreads();

	/******************************************************************
	 * Sum up the results in temp[] and put the result in temp[0]
	 ******************************************************************/

	int i = blockDim.x/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			//cache2[cacheIndex] *= cache2[cacheIndex + i];
			temp[tid] += temp[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Put the result in global variable leftSum
	if (tid == 0)
		leftSum[blockid] = temp[0];

	/***********************************************************************
	 * Now calculate the right sum, sum from threadId > blockId to max term
	 * code is identical to leftSum
	 * Results are put into rightSum[]
	 *********************************************************************/
	if (tid > blockid  && tid < 512)
		temp[tid] = data[tid];
	else
		temp[tid] = 0;

	__syncthreads();

	i = blockDim.x/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			//cache2[cacheIndex] *= cache2[cacheIndex + i];
			temp[tid] += temp[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	if (tid == 0)
		rightSum[blockid] = temp[0];

}

/********************************************************************
 * Function to check sums, using two shared memory arrays
 *********************************************************************/

__global__ void calculateSumsTwoSharedArray (double * data, double * leftSum, double *rightSum)
{
	int tid = threadIdx.x;
	int blockid = blockIdx.x;
	__shared__ double temp [threadsPerBlock];
	__shared__ double temp2 [threadsPerBlock];

	/************************************************************
	 * if thread id <= block id and thread id is less than the max index for data
	 * assign the data to temp[],
	 * otherwise assign 0
	 ************************************************************/
	temp[tid] = 0;
	temp2[tid] = 0;
	__syncthreads();

	if (tid <= blockid  && tid < 512)
		temp[tid] = data[tid];
	else
		temp[tid] = 0;

	__syncthreads();

	/******************************************************************
	 * Sum up the results in temp[] and put the result in temp[0]
	 ******************************************************************/

	int i = blockDim.x/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			//cache2[cacheIndex] *= cache2[cacheIndex + i];
			temp[tid] += temp[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	// Put the result in global variable leftSum
	if (tid == 0)
		leftSum[blockid] = temp[0];

	/***********************************************************************
	 * This time use temp2 to calculate rightSum[]
	 * Now calculate the right sum, sum from threadId > blockId to max term
	 * code is identical to leftSum
	 * Results are put into rightSum[]
	 *********************************************************************/
	if (tid > blockid  && tid < 512)
		temp2[tid] = data[tid];
	else
		temp2[tid] = 0;

	__syncthreads();

	i = blockDim.x/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			//cache2[cacheIndex] *= cache2[cacheIndex + i];
			temp2[tid] += temp2[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	if (tid == 0)
		rightSum[blockid] = temp2[0];

}	//end calculateSumsTwoSharedArray

/************************************************************************************************
 * calculateSumGrid: Kernel to test doing a sum by grids and blocks
 * Shows how multidimensional grids and blocks are to be used.
 *************************************************************************************************/

__global__ void calculateSumGrid (double * dev_gridData, double * dev_gridSum)
{

	/***********************************************************************
	 * This code is to test the launch of
	 * a 1D grid of 1 block, each of which has 16 x 32 threads.
	 *
     * dim3 Block(16,32,1);
     * dim3 Grid(1,1,1);
     * calculateSumGrid <<<Grid, Block>>> (dev_gridData, dev_gridSum);
	 *
	 * With this exact launch,
	 * Cuda has (1,1) blocks and (16,32) threads
	 * EX: For thread (15, 31) the max index that works
	 *
	 * threadIdx.x = 15
	 * threadIdx.y = 31
	 *
	 * For all threads:
	 * blockDim.x = 16
	 * blockDim.y = 32
	 *
	 * So if we want each thread to map to a unique coordinate:
	 *
	 * have tid =threadIdx.x *blockDim.y + threadIdx.y
	 * or tid = threadIdx.y * blockDim.x + threadIdx.x
	 *
	 ***********************************************************************/
	// Lets try a simple copy
/*	int tid = blockDim.y*threadIdx.x + threadIdx.y;
	if ( tid < windowSize)
		dev_gridSum[tid] = tid;
	__syncthreads();
*/
	/*******************************************************************
	 * End of simple copy test
	 ******************************************************************/

	/************************************************************************
	 *  second test: Test the launch of a 2d grid of 16x32 blocks, each with 16x32 threads
	 *
	 *  For block (15,31)
	 *
	 *  blockIdx.x = 15
	 *  blockIdx.y = 31
	 *
	 *  for all blocks
	 *  gridDim.x = 16
	 *  gridDim.y = 32
	 *
	 *  In order for all blocks to acheive unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 ************************************************************************/
	// Set unique ID's for all blocks, set unique ID's for all threads in blocks
	int blockID = blockIdx.x*gridDim.y+blockIdx.y;
	int tid = blockDim.y*threadIdx.x + threadIdx.y;
	int threadsInBlock = blockDim.x*blockDim.y;

	/*******IMPORTANT NEED TO MAKE SURE THAT threadsInBlock == threadsPerBlock********/
	__shared__ double temp [threadsPerBlock];
	// perform Running sum up to blockID
	if (tid < blockID)
		temp[tid] = 1.0;
	else
		temp[tid] = 0;

	__syncthreads();

	/***************************************************************************************
	 * Sum up the temp[] for all blocks.  temp[0] holds the sum for that particular block
	 * For Example, for block (5,6), blockID = 5*32+6 = 166
	 * temp[0] = 166
	 *
	 * Then dev_gridSum[166] = 166.
	 **************************************************************************************/

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp[tid] += temp[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	if(tid == 0)
		dev_gridSum[blockID] = temp[0];

}








/********************************************************************************
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 *
 *******************************************************************************/


void testCudaCopyWrapper(double *data, double *manipulatedData)
{
    double *dev_a, *dev_c, *dev_leftSum, *dev_rightSum, *dev_easySum;
    double * leftSum, * rightSum, * easySum;

    // allocate host memory for leftSum, rightSum
    easySum = (double *) malloc (512*sizeof(double));
    leftSum = (double *) malloc (512*sizeof(double));
    rightSum = (double *) malloc (512*sizeof(double));

    // allocate device memory for dev_a, dev_c
    cudaMalloc( (void **)&dev_a, sizeof(double) * 512 );
    cudaMalloc( (void **)&dev_c, sizeof(double) * 512 );

    // copy data to dev_a
    cudaMemcpy( dev_a, data, sizeof(double) * 512, cudaMemcpyHostToDevice );
    /*************************************************************************
     * This will add 0.1 to the dev_a array (just the data) put the result in dev_c
     *******************************************************************************/
    copyData<<< 1, 512 >>>( dev_a, dev_c );

    cudaMemcpy( manipulatedData, dev_c, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cout << "Original Data vs Original Data + 0.1 " << endl;
    for (int i =0; i< 512; i++){
    	cout << "data[" << i << "] = " << data[i] << "              manipulatedData[" << i << "] = " << manipulatedData[i] << endl;
    }
    /**********************************************************************************************
     * This will multiply the data in dev_c (the manipulatedData set by copyData() kernel by 2)
     * Notice that there is no copying operation here.
     * If this works then we have shown that global memory stays constant between kernel functions
     **********************************************************************************************/
    multiplyDataby2 <<< 1, 512 >>> (dev_c);
    // now copy data back to see if it has changed.

    cudaMemcpy( manipulatedData, dev_c, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cout << "Multiply the manipulated data by 2 " << endl;
    for (int i =0; i< 512; i++){
    	cout << "data[" << i << "] = " << data[i] << "              manipulatedData[" << i << "] = " << manipulatedData[i] << endl;
    }
    // Finished checking if globals stay in memory between Kernel calls.

    // Allocate device memory for leftSum, easySum, rightSum
    cudaMalloc( (void **)&dev_easySum, sizeof(double) * 512 );
    cudaMalloc( (void **)&dev_leftSum, sizeof(double) * 512 );
    cudaMalloc( (void **)&dev_rightSum, sizeof(double) * 512 );
   // Allocate easysum[] and copy to dev_easySum
    for (int i = 0; i< 512; i++){
    	easySum[i] = 1.0;
    }
    cudaMemcpy( dev_easySum, easySum, sizeof(double) * 512, cudaMemcpyHostToDevice );
    /****************************************************************************************************
     * Now launch NxN blocks x threads.  Have each block sum the array in manipulatedData up to blockIdx.x
     * then write that result back to dev_leftSum
     * Then have each block sum the array from blockIdx.x up to the max array index and put the result in rightSum
     *****************************************************************************************************/
    calculateSums <<< 512, 512 >>> (dev_easySum, dev_leftSum, dev_rightSum);

    cudaMemcpy( leftSum, dev_leftSum, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cudaMemcpy( rightSum, dev_rightSum, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cout << "sum the data by block. " << endl;
    for (int i =0; i< 512; i++){
    	cout << "leftSum[" << i << "] = " << leftSum[i] << "              rightSum[" << i << "] = " << rightSum[i] << endl;
    }
    // Finished Testing Checksums with one array

    /********************************************************************************************************************
     * Test Checksums with two __shared__ temp arrays.
     * Change easySum[] to 2 in order to get a different result from the previous one.
     *********************************************************************************************************************/
    for (int i = 0; i< 512; i++){
    	easySum[i] = 2.0;
    }
    cudaMemcpy( dev_easySum, easySum, sizeof(double) * 512, cudaMemcpyHostToDevice );

    // Now launch kernel to check if coding still works with two shared arrays.
    calculateSumsTwoSharedArray <<< 512, 512 >>> (dev_easySum, dev_leftSum, dev_rightSum);

    cudaMemcpy( leftSum, dev_leftSum, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cudaMemcpy( rightSum, dev_rightSum, sizeof(double) * 512, cudaMemcpyDeviceToHost );
    cout << "sum the data by block: two shared arrays per block. " << endl;
    for (int i =0; i< 512; i++){
    	cout << "leftSum[" << i << "] = " << leftSum[i] << "              rightSum[" << i << "] = " << rightSum[i] << endl;
    }


   /*****************************************************************************************************************
    * calculateSumGrid will calculate a running sum by using the dim3 variable and grids.
    *
    * It is trying to calculate 500 sums from global variable dev_gridData,
    * The result will be placed into global variable dev_gridSum
    *
    * The purpose of this function is to see how grids and blocks work.
    *
    ***************************************************************************************************************/
    double * gridSum, *dev_gridSum, *gridData, *dev_gridData;
    cudaMalloc( (void **)&dev_gridSum, sizeof(double) * windowSize );
    gridSum = (double *) malloc (windowSize * sizeof(double));

    cudaMalloc( (void **)&dev_gridData, sizeof(double) * windowSize );
    gridData = (double *) malloc (windowSize * sizeof(double));
    // Make this sum easy to read.
    for (int i = 0; i< windowSize; i++){
    	gridData[i] = 1.0;
    }

    // Copy data to device
    cudaMemcpy( dev_gridData, gridData, sizeof(double) * windowSize, cudaMemcpyHostToDevice );

    /********************************************************************************
    * Launch a 1D grid of 1 block, each of which has 16 x 32 threads.
	********************************************************************************/
/*    dim3 Block(16,32);
    dim3 Grid(1,1);

    calculateSumGrid <<<Grid, Block>>> (dev_gridData, dev_gridSum);

    cudaMemcpy( gridSum, dev_gridSum, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    // Try a simple copy operation first.
    for (int i =0; i< windowSize; i++){
    	cout << "gridSum[" << i << "] = " << gridSum[i] << endl;
    }
*/
    /*************************Finished the 1d grid copy*********************************/

    /***********************************************************************************
     * Launch a 2d grid of 16x32 blocks. Have the blocks do a running sum
     **********************************************************************************/
    dim3 Block2(16,32);
    dim3 Grid2(16,32);

    calculateSumGrid <<<Grid2,Block2>>> (dev_gridData, dev_gridSum);
    cudaMemcpy( gridSum, dev_gridSum, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    // Try a simple copy operation first.
    for (int i =0; i< windowSize; i++){
    	cout << "gridSum[" << i << "] = " << gridSum[i] << endl;
    }
    /***********************Finished 2d grid try **************************************/



    cout << "End of testCudaCopyWrapper, returning to main()" << endl;
    cudaFree(dev_gridSum);
    cudaFree(dev_gridData);
    cudaFree(dev_a);
    cudaFree(dev_c);
    cudaFree(dev_easySum);
    cudaFree(dev_leftSum);
    cudaFree(dev_rightSum);
    free(easySum);
    free(leftSum);
    free(rightSum);
    free(gridSum);
    free(gridData);

} // End testCudaCopyWrapper





/***********************************************************************************************************
 * calculateXplusXminus will calculate X_minus[k] and X_plus[k] for each k = blockId,
 * The results will be stored back to global memories in dev_X_minus[] and dev_X_plus[]
 *
 * Note that on the host side: X_minus[k] = (sum from 0 to k) X_minus[k]
 * So we need to launch another Kernel to do the final calculations of X_minus.
 *
 * The following code is from AEPD_Fit(), in our case, we change X_left = k to X_left = blockId
 *	for( int q = 0; q< 512; q++){
 *		if (q < X_left)
 *			x_minus_tmp[q] = abs(data[q] - p1);
 *		else
 *			x_plus_tmp[q - X_left] = data[q] - p1;
 *	}
 *	for (int t = 0; t < 512; t++){
 *		if (t < X_left)
 *			X_minus += pow(x_minus_tmp[t], alpha) / 512;
 *		else
 *			X_plus += pow(x_plus_tmp[t - X_left], alpha)/ 512;
 *************************************************************************************************************/

__global__ void calculateXplusXminus (double * data, double * dev_X_plus, double *dev_X_minus, double *dev_debug_tmpXplus, double *dev_debug_tmpXminus)
{
	int tid = threadIdx.x;
	int blockid = blockIdx.x;
	double alpha = 4.7; 	// This value will be dynamic as we launch more grids later.

	__shared__ double temp [threadsPerBlock];
	__shared__ double temp_X_plus [threadsPerBlock];
	__shared__ double temp_X_minus [threadsPerBlock];
	temp[tid] = 0;
	temp_X_plus[tid] = 0;
	temp_X_minus[tid] = 0;

	double p1 = data[blockid]; 	// Code fromm AEPD_Fit(): double p1 = data[k];
	__syncthreads();

	// The if else-if else loop below deals with cases where tid > windowSize,
	// Since data[] is an array with windowSize elements, assign 0 to any cases of overflow

	if (tid <= blockid){
		temp_X_minus[tid] = pow(abs( data[tid]-p1),alpha)/windowSize;
		temp_X_plus[tid] = 0;
	}
	else if (tid >blockid && tid < windowSize){
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = pow(abs(data[tid]-p1), alpha)/windowSize;
	}
	else {
		temp_X_minus[tid] = 0;
		temp_X_plus[tid] = 0;
	}

	__syncthreads();

	/************************************************************************
	 * Debugging X_minus[blockid], copy and print all temp_X_minus[] or temp_X_plus[]
	 * to debugging variables
	 * for one block id
	 ************************************************************************/
	if (blockid == 300 && tid < windowSize){
		dev_debug_tmpXplus [tid] = temp_X_plus[tid];
		dev_debug_tmpXplus [tid] = temp_X_plus[tid];
	}
	__syncthreads();

	// end of debugging X_minus[], results check out for blockid = 300

	/******************************************************************
	 * Sum up the results in temp_X_minus[] and put the result in temp_X_minus[0]
	 ******************************************************************/

	int i = blockDim.x/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp_X_minus[tid] += temp_X_minus[tid + i];
			temp_X_plus[tid] += temp_X_plus[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	/************************************************************************
	 * Debugging X_minus sum, copy result to dev_X_plus, dev_X_plus[0]
	 * should equal X_minus on the local side.
	 *
	 * Comment on error: The device side calculates X_minus for each blockid
	 * where the host side's X_minus is a running sum of each blockid plus the
	 * ones that came before
	 *
	 * This functionality is not executable on the device side, so launch a
	 * Seperate kernel to do this.
	 ************************************************************************/
/*	if (blockid == 300 && tid < windowSize)
		dev_X_plus [tid] = temp_X_minus[tid];
	__syncthreads();
*/
	// end of debugging X_minus[]

	/*********************************************************************
	 * Put the sum from temp_X_minus[0] to dev_X_minus[blockid]
	 * Do so for dev_X_plus as well.
	 *********************************************************************/
	if (tid == 0){
		dev_X_plus[blockid] = temp_X_plus[0];
		dev_X_minus[blockid] = temp_X_minus[0];
	}

	/*********************************************************************************
	 * At the end of the Kernel, now two global variables, dev_X_plus and dev_X_minus
	 * have been stored and are waiting to be used.
	 ********************************************************************************/
} // end calculateXplusXminus



/***********************************************************************************************
 * calculateSumXplusXminus() to sums the values in
 * global memories dev_X_minus[] and dev_X_plus[]
 *
 * There are 500 elements in dev_X_minus[] and dev_X_plus[]
 *
 * Ex; For block i, k = i, then X_minus will sum dev_X_minus from 0 to i and put the result in
 * dev_sumXminus[i]
 *
 * This Kernel does not need any data to be copied in or out.  It is simply a calculation Kernel.
 *
 *
 **********************************************************************************************/



__global__ void calculateSumXplusXminus (double * dev_X_plus, double * dev_X_minus, double * dev_sumXplus, double * dev_sumXminus, double *dev_debug1)
{
	/***********************************************************************
	 *
	 * Launch Code for this Kernel:
	 *
	 * dim3 Block (16,32);
	 * dim3 Grid (16,32);
	 * calculateSumXplusXminus <<< Grid, Block>>> ()
	 *
	 * For thread (15, 31)
	 * threadIdx.x = 15
	 * threadIdx.y = 31
	 *
	 * For all threads:
	 * blockDim.x = 16
	 * blockDim.y = 32
	 *
	 * So if we want each thread to map to a unique coordinate:
	 *
	 * have tid =threadIdx.x *blockDim.y + threadIdx.y
	 * or tid = threadIdx.y * blockDim.x + threadIdx.x
	 *
	 *  For block (15,31)
	 *
	 *  blockIdx.x = 15
	 *  blockIdx.y = 31
	 *
	 *  for all blocks
	 *  gridDim.x = 16
	 *  gridDim.y = 32
	 *
	 *  In order for all blocks to acheive unique ID
	 *  blockID = blockIdx.x*gridDim.y + blockIdx.y
	 *  or
	 *  blockID = blockIdx.y*gridDim.x + blockIdx.x
	 *
	 ************************************************************************/
	// Set unique ID's for all blocks, set unique ID's for all threads in blocks
	int blockID = blockIdx.x*gridDim.y+blockIdx.y;
	int tid = blockDim.y*threadIdx.x + threadIdx.y;
	int threadsInBlock = blockDim.x*blockDim.y;

	/*******IMPORTANT NEED TO MAKE SURE THAT threadsInBlock == threadsPerBlock********/
	__shared__ double temp [threadsPerBlock];
	__shared__ double temp2 [threadsPerBlock];

//	int edgeEffect = 5;

	temp[tid] = 0;
	temp2[tid] = 0;
	/**************************************************************************************************
	 *
	 * perform Running sum up to blockID
	 *
	 * Setting EDGE effects: From AEPD_Fit()
	 * 		for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	 *
	 * Here stepX = 1;
	 *
	 *	This is what I want the code to do:
	 *
	 *	for (int i = EDGE-1 ; i < windowSize - EDGE - 1; i++){
	 *		runningSumXplus += dev_X_plus[i];
	 *		cout << "X_plus[" << i << "] = " << runningSumXplus << endl;
	 *	}
	 *
	 *  For block 4: (i = 4)
	 *  X_plus[4] = 0 + dev_X_plus[4];
	 *
	 *  step 1:
	 *  for (blockID in range)
	 *  	temp2[tid] = dev_X_plus[tid] as long as tid <= blockID
	 *
	 **************************************************************************************************/

	if(blockID >= EDGE-1 && blockID <= windowSize-EDGE-1){
		if (tid <= blockID && tid >= EDGE-1 && tid <= windowSize-EDGE-1){
			temp[tid] = dev_X_minus[tid];
			temp2[tid] = dev_X_plus[tid];
		}
		else{
			temp2[tid] = 0;
			temp[tid] = 0;
		}
	}
	else{
		temp2[tid] = 0;
		temp[tid] = 0;
	}
	__syncthreads();

	/************************************************************************************************
	 * Debug for blockID = 5
	 ************************************************************************************************/
	if (blockID == 5 && tid < windowSize)
		dev_debug1 [tid] = temp2[tid];
	__syncthreads();



	/***************************************************************************************
	 * Sum up the temp[] for all blocks.  temp[0] holds the sum for that particular block
	 * For Example, for block (5,6), blockID = 5*32+6 = 166
	 * temp[0] = 166
	 *
	 * Then dev_gridSum[166] = 166.
	 **************************************************************************************/

	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			temp[tid] += temp[tid + i];
			temp2[tid] += temp2[tid + i];
		}
		__syncthreads();
		i /= 2;
	}

	if(tid == 0 && blockID<windowSize){
		dev_sumXminus[blockID] = temp[0];
		dev_sumXplus[blockID] = temp2[0];
	}

} // end calculateSumXplusXminus


/******************************************************************************************************************
 * calculateParameters() is a kernel that calculates Kappa, Sigma and H
 *
 * It finds the minimum of H and the corresponding values of Kappa and Sigma and dev_data (param[0])
 *
 *
 ******************************************************************************************************************/

__global__ void calculateParameters (double * dev_sumXminus, double * dev_sumXplus, double * dev_Kappa, double * dev_Sigma, double *dev_dataParam, double * dev_data, double * dev_lnGammaValue, double * dev_debug1, double * dev_debug2 )
{
	// Set unique ID's for all blocks, set unique ID's for all threads in blocks
	int blockID = blockIdx.x*gridDim.y+blockIdx.y;
	int tid = blockDim.y*threadIdx.x + threadIdx.y;
	int threadsInBlock = blockDim.x*blockDim.y;

	double alpha = 4.7;
	double X_minus = dev_sumXminus[tid];
	double X_plus = dev_sumXplus[tid];
	double LnGamma = dev_lnGammaValue[0];
	/*  IMPORTANT: need to make sure threadsInBlock = threadsPerBlock */
	__shared__ double tempKappa [threadsPerBlock];
	__shared__ double tempSigma [threadsPerBlock];
	__shared__ double tempH [threadsPerBlock];
	__shared__ int indexValue[threadsPerBlock];

	indexValue[tid] = tid;

	/**********************************************************************************
	 * From AEPD_Fit()
	 *
	 * for (int k = EDGE-1 ; k < windowSize - EDGE - 1; k++){
	 *		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));
	 *		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
	 *         * ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
	 *		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
	 *	  		+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
	 * }
	 *
 	 ***********************************************************************************/


	if (tid >=EDGE-1 && tid <= windowSize - EDGE - 1){
		tempKappa[tid] = pow(X_minus / X_plus, 1/(2*(alpha+1)));
		tempSigma[tid] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);
		if(tempKappa[tid] == 0){
			tempH[tid] = 1e10;
		}
		else{
			tempH[tid] = log(tempSigma[tid]) + LnGamma\
					+ log(tempKappa[tid] + 1/tempKappa[tid]) + 1/alpha - log(alpha);
		}

	}
	else{
		tempKappa[tid] = 0;
		tempSigma[tid] = 0;
		tempH[tid] = 0;
	}

	/************************************************************************************
	 * Debugging for tempKappa, tempSigma, tempH
	 ***********************************************************************************/
//	dev_debug1[tid] = tempKappa[tid];
//	dev_debug2[tid] = tempSigma[tid];
//	dev_debug1[tid] = tempH[tid];

	/************************************************************************************
	 * Finished debugging for tempKappa, tempSigma, tempH
	 ***********************************************************************************/


	/**********************************************************************************************
	 * Find the minimum value of H, and use that to find the corresponding min value of Sigma, Kappa
	 **********************************************************************************************/



	int i = threadsInBlock/2;  // i is the number of threads per block
	while (i != 0) {
		if (tid < i){
			if (tempH[tid] > tempH[tid+i]){
				tempH[tid] = tempH[tid+i];
				tempKappa[tid] = tempKappa[tid+i];
				tempSigma[tid] = tempSigma[tid+i];
				indexValue[tid] = indexValue[tid+i];
			}
		}
		__syncthreads();
		i /= 2;
	}

	/********************************************************************************
	 * Debugging for find minimum
	 *******************************************************************************/
	// checking for NaN values. Does not work on GPU compiler
/*	if (tempH[tid] != tempH[tid])
		tempH[tid] = 1e10;
*/
//	dev_debug1[tid] = tempH[tid];

	__syncthreads();
	if (tid == 0){
		dev_Kappa[0] = tempKappa[0];
		dev_Sigma[0] = tempSigma[0];
		dev_dataParam[0] = dev_data[indexValue[0]];
	}

} // end calculateParameters:

/*******************************************************************************************************************
 *
 * This is a wrapper function that will handle calls from C++ to Cuda
 * A function from another class will call this function and this function will
 * handle all the memory allocations, copies, and call the device kernels
 *
 * Specifically, this wrapper function will be used to implement and test the CUDA version of onealphaAEPD_Fit()
 * from apAEPD.cpp
 *
 ****************************************************************************************************************/
double locallngamma(double x, double* sgngam);

void testCudaAEPDWrapper (double *data, double *param){


	/****************************************************************************
	 * Sort the data as onealphaAEPD_Fit() does
	 ****************************************************************************/

	double tmp;
	for(int i = 0; i < windowSize-1 ;i++){
		for (int j = i + 1;j < windowSize;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	// Finished Sorting.

    double *dev_data, *dev_X_minus, *dev_X_plus;

    // Allocate memory for dev_data, dev_X_minus, dev_X_plus
    // all are size windowSize * doubles
    cudaMalloc( (void **)&dev_data, sizeof(double) * windowSize );
    cudaMalloc( (void **)&dev_X_minus, sizeof(double) * windowSize );
    cudaMalloc( (void **)&dev_X_plus, sizeof(double) * windowSize );

    double *absDebug;
    absDebug = (double *) malloc(windowSize * sizeof(double));

    //cudaMalloc( (void **)&dev_param, sizeof(double) * 4 );

    //Copy data to the device
    cudaMemcpy( dev_data, data, sizeof(double) * windowSize, cudaMemcpyHostToDevice );

    /********************************************************************************************
    *  Here we are launching 500x512 blocks to calculate X_minus and X_plus
    *  For each of the 500 blocks, there are 512 threads, but the data[] array is going to be just
    *  500 points long.  We need to check for these boundary conditions in the device code
    *********************************************************************************************/
    double *dev_debug_tmpXplus, *dev_debug_tmpXminus;
    cudaMalloc( (void **)&dev_debug_tmpXplus, sizeof(double) * windowSize);
    cudaMalloc( (void **)&dev_debug_tmpXminus, sizeof(double) * windowSize);

    calculateXplusXminus<<< windowSize, threadsPerBlock >>>( dev_data,  dev_X_plus, dev_X_minus, dev_debug_tmpXplus, dev_debug_tmpXminus);

    /********************************************************************************************
     * Debugging code for X_minus and X_plus
     ********************************************************************************************/
/*    double *debug_tmpXplus, *debug_tmpXminus;
    debug_tmpXplus = (double *) malloc(windowSize * sizeof(double));
    debug_tmpXminus = (double *) malloc(windowSize * sizeof(double));

    // Copy data from device to host: data holds tmpXplus[] and tmpXminus[] for k = 300
    cudaMemcpy(debug_tmpXminus , dev_debug_tmpXminus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    cudaMemcpy(debug_tmpXplus , dev_debug_tmpXplus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );

    // code to print out temp_X_minus for one block (blockid = 300)

    cout << "Print out temp_X_minus[500] and temp_X_plus[500] values for blockid (k) = 300" << endl;


    // Code checked out for temp_X_minus (blockid = 300), now print the sum of temp_X_minus.
    //for (int i =0; i< windowSize; i++){
    //    	cout << "temp_X_minus[" << i << "] = " << absDebug[i] << endl;
    //}

    // Code to print out entire X_minus[]
    for (int i =0; i< windowSize; i++){
    	cout << "X_minus[" << i << "] = " << debug_tmpXminus[i] <<"          X_plus[" << i << "] = " << debug_tmpXplus[i] << endl;
    }
*/


    // allocate host memory for the debug variables.

    double * debugXplus, * debugXminus;
    debugXplus = (double *) malloc(windowSize * sizeof(double));
    debugXminus = (double *) malloc(windowSize * sizeof(double));

    cudaMemcpy( debugXplus, dev_X_plus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    cudaMemcpy( debugXminus, dev_X_minus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );

    double runningSumXplus = 0;
    double runningSumXminus = 0;

    // Print out the dev_X_plus values:
/*    for (int i = 0; i< windowSize; i++)
    	cout << "dev_X_plus[" << i <<"] = " << debugXplus[i] << endl;


    //for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
    for (int i = EDGE-1 ; i < windowSize - EDGE - 1; i++){
    	runningSumXplus += debugXplus[i];
    	runningSumXminus += debugXminus[i];
    	cout << "X_plus[" << i << "] = " << runningSumXplus << endl;
    }
*/

    /*************************************************************************************************
     * End of debugging for calculateXplusXminus
     ************************************************************************************************/

    /***********************************************************************************************
     * Launch calculateSumXplusXminus to sum the values in
     * global memories dev_X_minus[] and dev_X_plus[] in order for the calculations to be correct.
     *
     * The kernel will be multidimensional
     *
     * Launch 16x32 blocks, with each block as 16x32 threads.
     *
     * each of the unique block IDs will represent one k
     * and write one X_minus, X_plus
     *
     * Ex; For block i, k = i, then X_minus will sum dev_X_minus from 0 to i and put the result in
     * dev_sumXminus[i]
     *
     * This Kernel does not need any data to be copied in or out.  It is simply a calculation Kernel.
     *
     * Note that this can also be done locally, by copying dev_X_minus and dev_X_plus back to
     * host memory, performing a sum, and then copying that back to device memory
     *
     * Let see what is faster.
     **********************************************************************************************/

    // Allocate memory for the sums and memory for debugging.
    double *dev_debug1, *dev_debug2, *dev_sumXplus, *dev_sumXminus;

    cudaMalloc( (void **)&dev_debug1, sizeof(double) * windowSize );
    cudaMalloc( (void **)&dev_debug2, sizeof(double) * windowSize );
    cudaMalloc( (void **)&dev_sumXplus, sizeof(double) * windowSize );
    cudaMalloc( (void **)&dev_sumXminus, sizeof(double) * windowSize );

    dim3 Block (16,32);
    dim3 Grid (16,32);

    calculateSumXplusXminus <<<Grid, Block >>> (dev_X_plus, dev_X_minus, dev_sumXplus, dev_sumXminus, dev_debug1);


    // Done with memories for dev_X_plus and dev_X_minus, can free up the memory if necessary
    //cudaFree(dev_X_plus);
    //cudaFree(dev_X_minus);

    /****************************************************************************************************
     * Debugging for CalculateSumXplusXminus
     *****************************************************************************************************/
    // allocate host memory for the debug variables.

    double * debug1, * debug2;
    debug1 = (double *) malloc(windowSize * sizeof(double));
    debug2 = (double *) malloc(windowSize * sizeof(double));

/*    cudaMemcpy( debug1, dev_debug1, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    for (int i =0; i< windowSize; i++){
    	cout << "temp2[" << i << "] = " << debug1[i] << endl;
    }



    cudaMemcpy( debug1, dev_sumXminus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
    cudaMemcpy( debug2, dev_sumXplus, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );

    // Code to print out entire X_minus[] and X_plus[]
    for (int i =0; i< windowSize; i++){
    	cout << "X_minus[" << i << "] = " << debug1[i] << "            X_plus[" << i << "] = " << debug2[i] << endl;
    }
*/
    /*****************************************************************************************************
     * End of calculateSumXplusXminus.
     ***************************************************************************************************/

    /******************************************************************************************************
     * calculateParameters() is a CUDA kernel that calculates Kappa[k], Sigma[k], and H[k] for block k.
     *
     * This Kernel uses the values of dev_sumXminus and dev_sumXplus that were calculated by
     * calculateSumXplusXminus() Kernel.
     *
     * Right now, launch with Blocks(16,32) Grid(1,1)
     *
     * The kernel will find minimum value of H[k] and use this index to find Kappa and Sigma values.
     * Kappa, Sigma, and data will be copied back from the device as doubles.
     *
     * There is one Log-Gamma value to calculate before beginning, this should be done before any calculations
     * At the top of this wrapper.
     *
     *****************************************************************************************************/
    // Allocate global memories for Kappa(param[1]), Sigma(param[2]), dataParam (param[0])

    double *dev_Kappa, *dev_Sigma, *dev_dataParam, *dev_lnGammaValue;

    // Allocate device memory for dev_data, dev_X_minus, dev_X_plus
    // all are size windowSize * doubles
    cudaMalloc( (void **)&dev_Kappa, sizeof(double) );
    cudaMalloc( (void **)&dev_Sigma, sizeof(double));
    cudaMalloc( (void **)&dev_dataParam, sizeof(double));
    cudaMalloc( (void **)&dev_lnGammaValue, sizeof(double));

    // calculate the Log Gamma value first and copy it.
	double sgngam = 0;
	double alpha = 4.7;
    double * LogGamma;
    LogGamma = (double *) malloc (sizeof(double));
    LogGamma[0] = locallngamma(1/alpha, &sgngam);
    cudaMemcpy( dev_lnGammaValue, LogGamma, sizeof(double), cudaMemcpyHostToDevice );

    dim3 Block1(16,32);
    dim3 Grid1(1,1);


    calculateParameters <<<Grid1, Block1>>> (dev_sumXminus, dev_sumXplus, dev_Kappa, dev_Sigma, dev_dataParam, dev_data, dev_lnGammaValue, dev_debug1, dev_debug2 );

    /**********************************************************************************************************
     * debugging for calculateParameters
     *********************************************************************************************************/
    // debug Kappa, Sigma, H and minimum calculations
    cudaMemcpy( debug1, dev_debug1, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );
//    cudaMemcpy( debug2, dev_debug2, sizeof(double) * windowSize, cudaMemcpyDeviceToHost );

    // Code to print out entire X_minus[] and X_plus[]
/*    for (int i =0; i< windowSize; i++){
//    	cout << "Kappa[" << i << "] = " << debug1[i] << "            Sigma[" << i << "] = " << debug2[i] << endl;
//    	cout << "H[" << i << "] = " << debug1[i] << endl;
    	cout << "H after sorting[" << i <<"] = " << debug1[i] <<endl;
    }*/
    /*****************************************************
     * 	params[0] = data[idx_k]; // theta
	 *  params[1] = Sigma[idx_k]; // sigma
	 *  params[2] = Kappa[idx_k]; // kappa
	 *  params[3] = alpha; // alpha
     ******************************************************/

    double *param0, *param1, *param2;
    param0 = (double *) malloc (sizeof(double));
    param1 = (double *) malloc (sizeof(double));
    param2 = (double *) malloc (sizeof(double));

    cudaMemcpy( param0, dev_dataParam, sizeof(double), cudaMemcpyDeviceToHost );
    cudaMemcpy( param1, dev_Sigma, sizeof(double), cudaMemcpyDeviceToHost );
    cudaMemcpy( param2, dev_Kappa, sizeof(double), cudaMemcpyDeviceToHost );

    cout << "Param 0 = " << param0[0] << endl;
    cout << "Param 1 = " << param1[0] << endl;
    cout << "Param 2 = " << param2[0] << endl;

    /***********************************************************************************************************
     * End debugging for calculateParameters
     *********************************************************************************************************/

    /*************************************************************************************************************
     * End calculateParameters()
     ************************************************************************************************************/



    cudaFree( dev_Kappa );
    cudaFree( dev_Sigma );
    cudaFree( dev_dataParam );
    cudaFree( dev_lnGammaValue );
    cudaFree( dev_debug1 );
    cudaFree( dev_debug2 );
	cudaFree( dev_data );
	cudaFree( dev_X_minus );
	cudaFree( dev_X_plus );
	cudaFree( dev_debug_tmpXplus );
	cudaFree( dev_debug_tmpXminus );
	cudaFree( dev_sumXplus );
	cudaFree( dev_sumXminus );
	free(absDebug);
	free(debug1);
	free(debug2);
	free(param0);
	free(param1);
	free(param2);
	return;
}

/***************************************************************
 * onealphaAEPD_Fit calculates the best parameters for one fixed value of alpha
 * It is written to aid in testing of the CUDA code.
 ***************************************************************/

void onealphaAEPD_Fit(double *x, int N, double *params, double alpha );


int main(void){
	/********************************************************************
	 * First test some basic CUDA operations
	 *******************************************************************/
	 double *data, *manipulatedData;
	 data = (double *) malloc(512*sizeof(double));
	 manipulatedData = (double *) malloc(512*sizeof(double));
	 for (int i = 0; i<512; i++){
		 data[i] = i*0.01+0.1;
	 }
	 //testCudaCopyWrapper(data, manipulatedData);

	 /******************************************************************
	  * Now test onealphaAEPD_Fit
	  * First We need to read in the data array (500 points) and then pass
	  * it to the CUDA wrapper
	  *****************************************************************/
		double *dataArray, *param;
		dataArray = (double *) malloc (windowSize*sizeof(double));
		param = (double *) malloc (4 * sizeof(double));
		fstream myfile("gbpusd.txt", ios::in);
		if (!myfile.is_open()){
			cout << "Error opening gbpusd text file " << endl;
			return 1;
		}
		double d = 0;
		int index= 0;
		while (index < windowSize){
			myfile >> d;
			dataArray[index++] = d;
		}
		// Finished reading and assigning data

		/*********************************************************************
		 * Call Cuda version of onealphaAEPDFit();
		 *********************************************************************/
//		cout << "Finding X_minus and X_plus via Cuda" << endl;
		testCudaAEPDWrapper (dataArray, param);

		/*********************************************************************
		 * Call C++ Version of onealphaAEPDFit(), which will print out results
		 * just like the Cuda Version for comparison.
		 *******************************************************************/

		// Pass it a constant alpha paramater (also the one used in CUDA version above.
		//onealphaAEPD_Fit(dataArray, windowSize, param, 4.7);
//		cout << "Finding X_minus and X_plus via C++" << endl;
		onealphaAEPD_Fit(dataArray, windowSize, param, 4.7);

		myfile.close();
		return 0;
}



/***************************************************************
 * onealphaAEPD_Fit calculates the best parameters for one fixed value of alpha
 * It is written to aid in testing of the CUDA code.
 ***************************************************************/


void onealphaAEPD_Fit(double *x, int N, double * params, double alpha){
	vector<double> data(x, x+N);
	double tmp;
	for(int i = 0;i < N-1 ;i++){
		for (int j = i + 1;j < N;j++){
			if (data[i] > data[j]){
				tmp  = data[i];
				data[i] = data[j];
				data[j] = tmp;
			}
		}
	}
	//double INCREMENT_ALPHA = 0.1;
	//int sizeALPHA = 80;
	int NUM_STEPS = 500;

	//double inc_A = INCREMENT_ALPHA;//////Iteration Increment of Alpha Parameter
	//double start_A = alpha;//////// Value to Start Alpha Iteration
	//int sizeA = sizeALPHA;//// Number of iterations to perform

	double * Kappa = new double[N];
	double * Sigma = new double[N];
	double * H = new double [N];

	double X_minus = 0;
	double X_plus = 0;

	int idx_k = 0;
	int idx_q = 0;
	double min_H = 10000000000;
	int stepX = N/NUM_STEPS;
	int X_right = 0;
	int X_left = 0;
	if (stepX <1)
		stepX=1;

	for(int k=EDGE-1 ; k <= N-EDGE-1; k=k+stepX)
	{

		double p1 = data[k];

		X_right = N- k;
		X_left = k ;

		double * x_minus_tmp = new double[X_left];
		double * x_plus_tmp = new double[X_right];

		/* Remember that data[] is already sorted.
		 * Take k as the current index (p1 = data[k]),
		 * x_minus_tmp[0 -> k] becomes absolute value (data - p1)
		 * x_plus_tmp[0 -> N - k] is just data -p1 (because already sorted)
		 */

		for( int q = 0; q< N; q++){
			if (q < X_left)
				x_minus_tmp[q] = abs(data[q] - p1);
			else
				x_plus_tmp[q - X_left] = data[q] - p1;
		}
		for (int t = 0; t < N; t++){
			if (t < X_left)
				X_minus += pow(x_minus_tmp[t], alpha) / N;
			else
				X_plus += pow(x_plus_tmp[t - X_left], alpha)/ N;
		}


		/************************************************************************
		 * Code to debug X_minus and X_plus calculation on GPU
		 **********************************************************************/
		// print out x_minus_tmp[] for k = 300

/*		if (k == 300){
			cout << "X_minus on host side = " << X_minus << "         X_plus = " << X_plus << endl;
//			cout << "Calculate X_minus by multiplying and summing" << endl;
//			cout << "X_minus = " << 8.23893e-21 * 275 + 3.16979e-22*2 << endl;
			for (int i = 0; i < N; i++){
				if (i < X_left)
					cout << "(x_minus_tmp[" << i <<"] ^ alpha ) / N = "<< pow (x_minus_tmp[i],alpha)/N << endl;
				else
					cout << "(x_plus_tmp[" << i <<"] ^ alpha ) / N = "<< pow (x_plus_tmp[i-X_left],alpha)/N << endl;

			}
		}
*/
		// print out x_plus_tmp[] for k = 5

/*		if (k == 5){
			cout << "temp2 on the host side for k = 5" << endl;
			for (int i = 0; i < windowSize; i++){
				if (i>= X_left)
					cout << "temp2[" << i << "] = "<< pow (x_plus_tmp[i-X_left],alpha)/N << endl;
					cout << "(data["<<i <<"]-p1) =" << data[i] - p1 << endl;
					cout << "(data["<<i <<"]-p1)^alpha/N = " << pow (data[i]-p1,alpha)/N << endl;
			}
		}
*/

		// Print out all X_minus[] and X_plus[]
//		cout << "X_minus[" << k <<"] = "<< X_minus << "         X_plus[" << k << "] = "<< X_plus << endl;

		/****************************************************************************
		 * End of debugging X_minus and X_plus.
		 ***************************************************************************/


		Kappa[k] = pow( X_minus / X_plus, 1 / (2*(alpha +1)));

		Sigma[k] = pow((alpha*  pow( X_minus*X_plus, alpha/ (2*(alpha+1)))\
			* ( pow(X_plus, 1/(alpha+1)) + pow(X_minus, 1/(alpha+1)))), 1/alpha);

		double sgngam = 0;
		H[k] = log(Sigma[k]) + locallngamma(1/alpha, &sgngam) \
			+ log(Kappa[k] + 1/Kappa[k]) + 1/alpha - log(alpha);
		/*******************************************************************************************
		 * Debugging Kappa, Sigma and H
		 ******************************************************************************************/
		// Debug Kappa and Sigma.

//		cout << "Kappa[" << k <<"] = " << Kappa[k] << "         Sigma[" << k << "] = "<< Sigma[k] << endl;
//    	cout << "H[" << k << "] = " << H[k] << endl;
		// end Kappa and Sigma, H debug.


		if (H[k] < min_H){
			min_H = H[k];
			idx_k = k;
		}




		delete [] x_minus_tmp;
		delete [] x_plus_tmp;
		/*if (k == 275){
			cout << "K = 275" << endl;
			cout << "Sigma[275] = " << Sigma[k] << endl;
			cout << "Kappa[275] = " << Kappa[k] << endl;
			cout << "H[275] = " << H[k] << endl;
			cout << "X_minus given k = " << X_minus<< endl;
			cout << "X_plus given k = " << X_plus<< endl;
		}*/
	}
	params[0] = data[idx_k]; // theta
	params[1] = Sigma[idx_k]; // sigma
	params[2] = Kappa[idx_k]; // kappa
	params[3] = alpha; // alpha
	//cout << "idx_k = " << idx_k << endl;
    cout << "Param 0 = " << params[0] << endl;
    cout << "Param 1 = " << params[1] << endl;
    cout << "Param 2 = " << params[2] << endl;
	delete [] Kappa;
	delete [] Sigma;
	delete [] H;
	return;
}
/*************************************************************************************
 * Log- Gamma function adapted from Alglib project
 *
 * Copy right and comments from original:
 ***************************************************************************************/

double locallngamma(double x, double* sgngam)
{

    double a;
    double b;
    double c;
    double p;
    double q;
    double u;
    double w;
    double z;
    int i;
    double logpi;
    double ls2pi;
    double tmp;
    double result;

    *sgngam = 0;

    *sgngam = 1;
    logpi = 1.14472988584940017414;
    ls2pi = 0.91893853320467274178;
    if( x<-34.0 )
    {
        q = -x;
        w = locallngamma(q, &tmp);
        p = floor(q);
        i = floor(p+0.5);
        if( i%2==0 )
        {
            *sgngam = -1;
        }
        else
        {
            *sgngam = 1;
        }
        z = q-p;
        if( z>0.5 )
        {
            p = p+1;
            z = p-q;
        }
        z = q*sin(ae_pi*z);
        result = logpi-log(z)-w;
        return result;
    }
    if( x<13 )
    {
        z = 1;
        p = 0;
        u = x;
        while(u>=3)
        {
            p = p-1;
            u = x+p;
            z = z*u;
        }
        while(u<2)
        {
            z = z/u;
            p = p+1;
            u = x+p;
        }
        if( z<0 )
        {
            *sgngam = -1;
            z = -z;
        }
        else
        {
            *sgngam = 1;
        }
        if( u==2)
        {
            result = log(z);
            return result;
        }
        p = p-2;
        x = x+p;
        b = -1378.25152569120859100;
        b = -38801.6315134637840924+x*b;
        b = -331612.992738871184744+x*b;
        b = -1162370.97492762307383+x*b;
        b = -1721737.00820839662146+x*b;
        b = -853555.664245765465627+x*b;
        c = 1;
        c = -351.815701436523470549+x*c;
        c = -17064.2106651881159223+x*c;
        c = -220528.590553854454839+x*c;
        c = -1139334.44367982507207+x*c;
        c = -2532523.07177582951285+x*c;
        c = -2018891.41433532773231+x*c;
        p = x*b/c;
        result = log(z)+p;
        return result;
    }
    q = (x-0.5)*log(x)-x+ls2pi;
    if(x>100000000 )
    {
        result = q;
        return result;
    }
    p = 1/(x*x);
    if( x>=1000.0 )
    {
        q = q+((7.9365079365079365079365*0.0001*p-2.7777777777777777777778*0.001)*p+0.0833333333333333333333)/x;
    }
    else
    {
        a = 8.11614167470508450300*0.0001;
        a = -5.95061904284301438324*0.0001+p*a;
        a = 7.93650340457716943945*0.0001+p*a;
        a = -2.77777777730099687205*0.001+p*a;
        a = 8.33333333333331927722*0.01+p*a;
        q = q+a/x;
    }
    result = q;
    return result;

}
