#ifndef __FIRSTORDER_H__
#define __FIRSTORDER_H__

#include "reduce.h"
#include "framework.h"
#include "kernel.h"
#include <float.h>
#include "cuPrintf.cu"

#define intRoundUp(a, b) (a%b!=0)?(a/b+1):(a/b)

template<bool iLowCompute, bool iHighCompute>
__global__ void	firstOrderPhaseOne(float* devData, int devDataPitchSize,  float* devLabels, int n_obs, int n_dim, float epsilon, float cost_epsilon, float* devAlpha, float* devF, float alpha1Diff, float alpha2Diff, int iLow, int iHigh, float* devCache, int devCachePitchInFloats, int iLowCacheIndex, int iHighCacheIndex, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH) {

    //CHECK EVERYTHING IN HERE
    cuPrintf("devData: %f %f %f %f\n", devData[0], devData[1], devData[2],
                                     devData[devDataPitchSize+1]);
    cuPrintf("devDataPitchSize: %i", devDataPitchSize);
    cuPrintf("devLabels: %f %f %f %f\n", devLabels[1], devLabels[2], devLabels[3],
                                         devLabels[4]);
    cuPrintf("devAlpha: %f %f %f\n", devAlpha[0], devAlpha[1], devAlpha[2]);
    cuPrintf("devF: %f %f %f\n", devF[0], devF[1], devF[2]);
    cuPrintf("alpha1Diff: %f\n", alpha1Diff);
    cuPrintf("alpha2Diff: %f\n", alpha2Diff);
    cuPrintf("iLow: %i\n", iLow);
    cuPrintf("iHigh: %i\n", iHigh);

    //xIHighLow size = 2*n_dim
    extern __shared__ float xIHighLow[];
	__shared__ int tempLocalIndices[BLOCKSIZE];
	__shared__ float tempLocalFs[BLOCKSIZE];
  
	if (iHighCompute) {
		//Load xIHigh into shared memory
		if(threadIdx.x==0)
		{
			for(int i =0; i< n_dim; i++)
				xIHighLow[i] = devData[iHigh*devDataPitchSize + i];
		}
	}

	if (iLowCompute) {
		//Load xILow into shared memory
		if(threadIdx.x==0)
		{
			for(int i=0; i<n_dim; i++)
				xIHighLow[n_dim+i]=devData[iLow*devDataPitchSize + i];
		}
	}
	
	__syncthreads();

	int globalIndex = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;

	float alpha;
	float f;
	float label;
	int reduceFlag; //ihigh =0, ilow=1, both=2, none=-1
  
	if (globalIndex < n_obs) {

		alpha = devAlpha[globalIndex];
		f = devF[globalIndex];
		label = devLabels[globalIndex];
    
    
		if (alpha > epsilon) {
			if (alpha < cost_epsilon) {
				reduceFlag = 2; //Unbound support vector (I0)
			} else {
				if (label > 0) {
					reduceFlag = 1; //Bound positive support vector (I3)
				} else {
					reduceFlag = 0; //Bound negative support vector (I2)
				}
			}
		} else {
			if (label > 0) {
				reduceFlag = 0; //Positive nonsupport vector (I1)
			} else {
				reduceFlag = 1; //Negative nonsupport vector (I4)
			}
		}
	} else {
		reduceFlag = -1;
	}

  
	float highKernel = 0;
	float lowKernel = 0;
	if (reduceFlag >= 0) {
//		if (!iHighCompute) {
//			highKernel = devCache[(devCachePitchInFloats * iHighCacheIndex) + globalIndex];
//		}
//		if (!iLowCompute) {
//			lowKernel = devCache[(devCachePitchInFloats * iLowCacheIndex) + globalIndex];
//		}
//		if (iHighCompute && iLowCompute) {
//			dualKernel(devData + globalIndex*devDataPitchSize, xIHighLow, xIHighLow + n_dim ,n_dim, highKernel, lowKernel);
//		} else if (iHighCompute) {
			highKernel = kernel(devData + globalIndex * devDataPitchSize, xIHighLow, n_dim);
//		} else if (iLowCompute) {
			lowKernel = kernel(devData + globalIndex * devDataPitchSize,  xIHighLow + n_dim, n_dim);
//		}

		//highKernel = phi(x_i, x_high);
		//lowKernel = phi(x_i, x_low);
		//Update the f_i
		f = f + alpha1Diff * highKernel;
		f = f + alpha2Diff * lowKernel;
		
		//update the cache
//		if (iLowCompute)  devCache[(devCachePitchInFloats * iLowCacheIndex) + globalIndex] = lowKernel;
//		if (iHighCompute) devCache[(devCachePitchInFloats * iHighCacheIndex) + globalIndex] = highKernel;
	
		devF[globalIndex] = f;
		
	}
	__syncthreads();

	//Ilow
	if ((reduceFlag==1) || (reduceFlag==2)) {		
		tempLocalFs[threadIdx.x] = f;
		tempLocalIndices[threadIdx.x] = globalIndex;
	} else {
		tempLocalFs[threadIdx.x] = -FLT_MAX; //Ignore me
	}

	__syncthreads();
  
	argmaxReduce(tempLocalFs, tempLocalIndices);

	__syncthreads();
  
  
	if (threadIdx.x == 0) {
		devLocalIndicesRL[blockIdx.x] = tempLocalIndices[0];
		devLocalFsRL[blockIdx.x] = tempLocalFs[0];
	}
 
	__syncthreads();

	//Ihigh
	if ((reduceFlag ==0)|| (reduceFlag ==2)) {
		tempLocalFs[threadIdx.x] = f;
		tempLocalIndices[threadIdx.x] = globalIndex;
	} else {
		tempLocalFs[threadIdx.x] = FLT_MAX; //Ignore me
	}
	__syncthreads();
   
	argminReduce(tempLocalFs, tempLocalIndices);
	if (threadIdx.x == 0) {
		devLocalIndicesRH[blockIdx.x] = tempLocalIndices[0];
		devLocalFsRH[blockIdx.x] = tempLocalFs[0];
	}
}


__global__ void firstOrderPhaseTwo(float* devData, int devDataPitchSize, float* devLabels, float* devKernelDiag, float* devAlpha, float* devResult, float cost, int n_dim, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH, int inputSize) {
  
  __shared__ int tempIndices[BLOCKSIZE];
  __shared__ float tempFs[BLOCKSIZE];
  
  int iHigh;
  int iLow;
  float bHigh;
  float bLow;
  
  //Load elements
  if (threadIdx.x < inputSize) {
    tempIndices[threadIdx.x] = devLocalIndicesRH[threadIdx.x];
    tempFs[threadIdx.x] = devLocalFsRH[threadIdx.x];
  } else {
    tempFs[threadIdx.x] = FLT_MAX;
  }

  if (inputSize > BLOCKSIZE) {
    for (int i = threadIdx.x + BLOCKSIZE; i < inputSize; i += blockDim.x) {
      argMin(tempIndices[threadIdx.x], tempFs[threadIdx.x], devLocalIndicesRH[i], devLocalFsRH[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
    }
  }
  __syncthreads();

//  if (threadIdx.x==0) {
//    for (int i=0; i<inputSize; i++) {
//    cuPrintf("tempFs[%i] = %f\n", i, tempFs[i]);
//    }
//  }
  

  argminReduce(tempFs, tempIndices);

  __syncthreads();
  if(threadIdx.x==0){
	  iHigh = tempIndices[0];
	  bHigh = tempFs[0];
  }
  
  __syncthreads();

  //Load elements
  if (threadIdx.x < inputSize) {
    tempIndices[threadIdx.x] = devLocalIndicesRL[threadIdx.x];
    tempFs[threadIdx.x] = devLocalFsRL[threadIdx.x];
  } else {
    tempFs[threadIdx.x] = -FLT_MAX;
  }


  //blockDim.x==number per block
  if (inputSize > BLOCKSIZE) {
    for (int i = threadIdx.x + BLOCKSIZE; i < inputSize; i += blockDim.x) {
      argMax(tempIndices[threadIdx.x], tempFs[threadIdx.x], devLocalIndicesRL[i], devLocalFsRL[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
    }
  }
  __syncthreads();
  
  argmaxReduce(tempFs, tempIndices);
  
  __syncthreads();
  
  if (threadIdx.x == 0) {
    
	iLow = tempIndices[0];
	bLow = tempFs[0];
  
    float eta = devKernelDiag[iHigh] + devKernelDiag[iLow];
      
    float kernelEval = kernel(devData+iHigh*devDataPitchSize, devData + iLow *devDataPitchSize, n_dim);
    eta = eta - 2*kernelEval;
      
    float alpha1Old = devAlpha[iHigh];
    float alpha2Old = devAlpha[iLow];

	int ylow = devLabels[iLow];
	int yhigh = devLabels[iHigh];

    float alpha2New = alpha2Old + ylow*(bHigh - bLow) / eta;

	if(alpha2New<0) alpha2New =0;
	if(alpha2New>cost) alpha2New = cost;

	float alpha1New = alpha1Old + ylow * yhigh * (alpha2Old - alpha2New);
	if(alpha1New<0) alpha1New =0;
	if(alpha1New>cost) alpha1New = cost;

    *(devResult + 0) = alpha2Old;
    *(devResult + 1) = alpha1Old;
    *(devResult + 2) = bLow;
    *(devResult + 3) = bHigh;
    devAlpha[iLow] = alpha2New;
    devAlpha[iHigh] = alpha1New;
    *(devResult + 4) = alpha2New;
    *(devResult + 5) = alpha1New;
    *(devResult + 6) = float(iLow);
    *(devResult + 7) = float(iHigh);
  }
  
}


void launchFirstOrder(bool iLowCompute, bool iHighCompute, int n_obs, int n_dim, dim3 blocks, dim3 threads, dim3 globalThreads, float* devData, int devDataPitchSize, float* devLabels, float epsilon, float cost_epsilon, float* devAlpha, float* devF, float sAlpha1Diff, float sAlpha2Diff, int iLow, int iHigh, float* devCache, int devCachePitchInFloats, int iLowCacheIndex, int iHighCacheIndex, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRH, float* devLocalFsRL, float* devKernelDiag, float* devResult, float cost) {

  // The size of the shared memory
  int phaseOneSize = 2 * n_dim * sizeof(float);
 
  
//  if (iLowCompute == true) {
//      if (iHighCompute == true) {
  firstOrderPhaseOne<true,true><<<blocks, threads, phaseOneSize>>>(devData, devDataPitchSize, devLabels, n_obs, n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH);
//      } else if (iHighCompute == false) {
//  firstOrderPhaseOne<true,false><<<blocks, threads, phaseOneSize>>>(devData, devDataPitchSize, devLabels, n_obs, n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH);
//      }
//  } else if (iLowCompute == false) {
//      if (iHighCompute == true) {
//  firstOrderPhaseOne<false,true><<<blocks, threads, phaseOneSize>>>(devData, devDataPitchSize, devLabels, n_obs, n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH);

//      } else if (iHighCompute == false) {
//  firstOrderPhaseOne<false,false><<<blocks, threads, phaseOneSize>>>(devData, devDataPitchSize, devLabels, n_obs, n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH);
//      }
//  }

/*
  int blockWidth = intRoundUp(n_obs , BLOCKSIZE);

  int *LocalIndicesRH = (int *) malloc(blockWidth * sizeof(int)); 
  int *LocalIndicesRL = (int *) malloc(blockWidth * sizeof(int));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)LocalIndicesRH, (void*)devLocalIndicesRH, blockWidth*sizeof(int), cudaMemcpyDeviceToHost));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)LocalIndicesRL, (void*)devLocalIndicesRL, blockWidth*sizeof(int), cudaMemcpyDeviceToHost));
  
  float *LocalFsRL = (float*) malloc(blockWidth*sizeof(float));
  float *LocalFsRH = (float*) malloc(blockWidth*sizeof(float));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)LocalFsRH, (void*)devLocalFsRH, blockWidth*sizeof(float), cudaMemcpyDeviceToHost));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)LocalFsRL, (void*)devLocalFsRL, blockWidth*sizeof(float), cudaMemcpyDeviceToHost));
*/
/*
  for(int i=0; i<blockWidth; i++)
	  printf("%d, \n", LocalIndicesRL[i]);


  printf("\n");

  for(int i=0; i<blockWidth; i++)
	  printf("%d, \n", LocalIndicesRH[i]);
  
  printf("\n");
  
  for(int i=0; i<blockWidth; i++)
	  printf("%f, \n", LocalFsRL[i]);


  printf("\n");

  for(int i=0; i<blockWidth; i++)
	  printf("%f, \n", LocalFsRH[i]);
*/
  /*
  if (iLowCompute == true) {
    if (iHighCompute == true) {
        firstOrderPhaseOne <true, true><<<blocks, threads, phaseOneSize>>>(devData, 
                devDataPitchSize, devLabels, n_obs, 
                n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, 
                devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, 
                devLocalIndicesRH, devLocalFsRL, devLocalFsRH);
    } else if (iHighCompute == false) {
       firstOrderPhaseOne <true, false><<<blocks, threads, phaseOneSize>>>(devData, 
               devDataPitchSize, devLabels, n_obs, 
               n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, 
               devCache, devCachePitchInFloats, iLowCacheIndex,iHighCacheIndex, devLocalIndicesRL, 
               devLocalIndicesRH, devLocalFsRL, devLocalFsRH);
    } 
  } else if (iLowCompute == false) {
    if (iHighCompute == true) {
       firstOrderPhaseOne <false, true><<<blocks, threads, phaseOneSize>>>(devData, 
               devDataPitchSize,  devLabels, n_obs, 
               n_dim, epsilon, cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, 
               devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, 
               devLocalFsRL, devLocalFsRH);
    } else if (iHighCompute == false) {
       firstOrderPhaseOne <false, false><<<blocks, threads, phaseOneSize>>>(devData, 
               devDataPitchSize, devLabels, n_obs, 
               n_dim,epsilon,cost_epsilon, devAlpha, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, devCache, 
               devCachePitchInFloats,iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, 
               devLocalFsRL, devLocalFsRH);
    }
  }
  */
  //print out some stuff
/*
  float *hLocalLowFs, *hLocalHighFs;
  int *hLocalLowIs, *hLocalHighIs;
  hLocalLowFs = (float*)malloc(n_obs*sizeof(float));
  hLocalHighFs = (float*)malloc(n_obs*sizeof(float));
  hLocalLowIs = (int*)malloc(n_obs*sizeof(int));
  hLocalHighIs = (int*)malloc(n_obs*sizeof(int));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)hLocalLowFs,(void*)devLocalFsRL,blocks.x*sizeof(float), cudaMemcpyDeviceToHost));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)hLocalHighFs,(void*)devLocalFsRH,blocks.x*sizeof(float), cudaMemcpyDeviceToHost));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)hLocalLowIs,(void*)devLocalIndicesRL,blocks.x*sizeof(int), cudaMemcpyDeviceToHost));
  CUDA_CHECK_ERROR(cudaMemcpy((void*)hLocalHighIs,(void*)devLocalIndicesRH,blocks.x*sizeof(int), cudaMemcpyDeviceToHost));
  for (int i=0; i<blocks.x; i++) {
     printf("%i: lowF=%f, highF=%f, lowI=%i, highI=%i\n",i, hLocalLowFs[i], hLocalHighFs[i], hLocalLowIs[i], hLocalHighIs[i]);
  }
*/

  //run phase 2 
  firstOrderPhaseTwo<<<1, globalThreads>>>(devData, devDataPitchSize, 
           devLabels, devKernelDiag, devAlpha, devResult, 
          cost, n_dim, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, 
          devLocalFsRH, blocks.x);
}
#endif
