#ifndef _SVM_KERNELS_H_
#define _SVM_KERNELS_H_

#include <float.h>
#include <math.h>
#include "inc/cuPrintf.cu"

#define BLOCK_SIZE 256
#define MB_LEAVE  100

///////////////////////DATA AND MODEL I/O ///////////////////////////////////////
static char *line = NULL;
static int maxLineLen;

static char* read_line(FILE *input) {
    int len;
    if(fgets(line, maxLineLen, input) == NULL)
        return NULL;
    while(strrchr(line,'\n') == NULL) {
        maxLineLen *= 2;
        line = (char*)realloc(line,maxLineLen);
        len = (int)strlen(line);
        if(fgets(line+len, maxLineLen-len, input) == NULL)
            break;
    }
    return line;
}

int read_data(const char *filename, float **p_features, float **p_labels, int *p_nObs, int *p_nDim) {
    
    int nObs, nDim, instMaxIdx, index;
    float value;
    char *endptr;
    char *idx, *val, *label, *p;

    FILE *inputFile = fopen(filename, "r");
    if (inputFile == 0) {
        printf("File not found!\n");
        return 0;
    }
    
    nObs=0; nDim=0; instMaxIdx=0; maxLineLen=1024;
    line = (char*)malloc(maxLineLen*sizeof(char));

    //determine number of training obs and number of features
    while(read_line(inputFile)!=NULL) {
        p = strtok(line," \t");
        while(1)
        {
            idx = strtok(NULL,":");
            p = strtok(NULL," \t");
            if (p==NULL || *p=='\n') break;
            instMaxIdx = (int)strtol(idx, &endptr, 10);
            if (instMaxIdx > nDim) nDim = instMaxIdx;
        }
        ++nObs;
    }

    //now read in the data into memory
    rewind(inputFile);
    *(p_nObs) = nObs;
    *(p_nDim) = nDim;
    float *features = (float*)malloc(sizeof(float)*nObs*nDim);
    float *labels = (float*)malloc(sizeof(float)*nObs);
    *(p_features) = features;
    *(p_labels) = labels;
    for (int i=0; i<nObs; i++) {
        read_line(inputFile);
        label = strtok(line, " \t\n");
        labels[i] = atof(label);
        while (1) {
            idx = strtok(NULL, ":");
            val = strtok(NULL, " \t");
            if (val == NULL) break;
            index = (int)strtol(idx, &endptr, 10);
            value = (float)strtod(val, &endptr);
            features[i*nDim+index-1] = value;
        }
    }

    fclose(inputFile);
    return 1;
}

void print_model(const char *outFileName, float *alpha, float *labels, float *features, float b, int nObs, int nDim, float eps)
{
    int nSV = 0; int pSV = 0;

    printf("Output File: %s\n", outFileName);
    FILE *outFile = fopen(outFileName, "w");
    if (outFile == NULL) { 
        printf("Can't write to %s\n", outFile); 
        exit(1); 
    }

    //tally number of support vectors
    for (int i=0; i<nObs; i++) {
        if (alpha[i] > eps) {
            if (labels[i] > 0) { pSV++; }
            else               { nSV++; }
        }
    }

    //produce model output
    fprintf(outFile, "SVM Model Results:\n");
    fprintf(outFile, "Total # of SVs: %d\n", pSV+nSV);
    fprintf(outFile, "Total +SVs: %d\n", pSV);
    fprintf(outFile, "Total -SVs: %d\n", nSV);
    fprintf(outFile, "B offset estimate: %f\n", b);

    //print support vectors
    fprintf(outFile, "\nSupport Vectors:\n");
    for (int i=0; i<nObs; i++) {
        if (alpha[i] > eps) {
            fprintf(outFile, "[%i] %f ", i, labels[i]*alpha[i]);
            for (int j=0; j<nDim; j++) {
                fprintf(outFile, "%d:%f ", j+1, features[j*nObs+i]);
            }
            fprintf(outFile, "\n");
        }
    }
    fclose(outFile);

}

///////////////////////////// CACHING //////////////////////////////////////////
void check_cache(int iHigh, int iLow, bool& iHighCompute, bool& iLowCompute, int& iHighCache, int& iLowCache, int *Cache, int cacheSize) {
    iHighCache = iHigh%cacheSize;
    iLowCache = iLow%cacheSize;
    
    if (Cache[iHighCache] == iHigh) iHighCompute = 0;
    else iHighCompute = 1;
    if (Cache[iLowCache] == iLow) iHighCompute = 0;
    else iLowCompute = 1;

    if (iHighCompute) Cache[iHighCache] = iHigh;
    if (iLowCompute) Cache[iHighCache] = iLow;
}

///////////////////////////// LINEAR KERNEL FUNCTIONS ////////////////////////////////
//compute phi(A'B)
static __device__ __host__ float kernel(float *pA, float *pB, int nDim) {
    float acc = 0.0f;
	for(int i=0; i<nDim; i++)
		acc += (*(pA+i)) * (*(pB+i));
	return acc;
}
//compute phi(a,b) and phi(a,c), where b and c are already in shared memory
//and a is in global memory
static __device__ __host__ 
void dual_kernel(float *pA, float *pB, float *pC, int nDim, float& phi1, float& phi2) {
    float acc1 = 0.0f; float acc2 = 0.0f;
    for(int i=0; i<nDim; i++){
        float tmpA = *(pA+i);
        acc1 += tmpA * (*(pB+i));
        acc2 += tmpA * (*(pC+i));
    } 
    phi1 = acc1; phi2 = acc2;
}

////////////////////////////// REDUCTION KERNELS //////////////////////////////////////
//ala Mark Harris
__device__ void argmin(int AIdx, float AVal, int BIdx, float BVal, int* idx, float* val) {
	if (BVal < AVal) { *idx = BIdx; *val = BVal; }
    else { *idx = AIdx; *val = AVal; }
}
__device__ void argmax(int AIdx, float AVal, int BIdx, float BVal, int* idx, float* val) {
	if (BVal > AVal) { *idx = BIdx; *val = BVal; }
    else { *idx = AIdx; *val = AVal; }
}

__device__ void reduce_min(float* val, int *idx) {
    int tid = threadIdx.x;
    if(BLOCK_SIZE >=512){if(tid<256){argmin(idx[tid], val[tid], idx[tid+256], val[tid+256], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=256){if(tid<128){argmin(idx[tid], val[tid], idx[tid+128], val[tid+128], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=128){if(tid< 64){argmin(idx[tid], val[tid], idx[tid+ 64], val[tid+ 64], idx+tid, val+tid);} __syncthreads();}

    if(tid<32){
        if(BLOCK_SIZE>=64)  argmin(idx[tid], val[tid], idx[tid+32], val[tid+32], idx+tid, val+tid);
        if(BLOCK_SIZE>=32)  argmin(idx[tid], val[tid], idx[tid+16], val[tid+16], idx+tid, val+tid);
        if(BLOCK_SIZE>=16)  argmin(idx[tid], val[tid], idx[tid+ 8], val[tid+ 8], idx+tid, val+tid);
        if(BLOCK_SIZE>= 8)  argmin(idx[tid], val[tid], idx[tid+ 4], val[tid+ 4], idx+tid, val+tid);
        if(BLOCK_SIZE>= 4)  argmin(idx[tid], val[tid], idx[tid+ 2], val[tid+ 2], idx+tid, val+tid);
        if(BLOCK_SIZE>= 2)  argmin(idx[tid], val[tid], idx[tid+ 1], val[tid+ 1], idx+tid, val+tid);
    }
}

__device__ void reduce_max(float* val, int *idx) {
    int tid = threadIdx.x;
    if(BLOCK_SIZE >=512){if(tid<256){argmax(idx[tid], val[tid], idx[tid+256], val[tid+256], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=256){if(tid<128){argmax(idx[tid], val[tid], idx[tid+128], val[tid+128], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=128){if(tid< 64){argmax(idx[tid], val[tid], idx[tid+ 64], val[tid+ 64], idx+tid, val+tid);} __syncthreads();}

    if(tid<32){
        if(BLOCK_SIZE>=64)  argmax(idx[tid], val[tid], idx[tid+32], val[tid+32], idx+tid, val+tid);
        if(BLOCK_SIZE>=32)  argmax(idx[tid], val[tid], idx[tid+16], val[tid+16], idx+tid, val+tid);
        if(BLOCK_SIZE>=16)  argmax(idx[tid], val[tid], idx[tid+ 8], val[tid+ 8], idx+tid, val+tid);
        if(BLOCK_SIZE>= 8)  argmax(idx[tid], val[tid], idx[tid+ 4], val[tid+ 4], idx+tid, val+tid);
        if(BLOCK_SIZE>= 4)  argmax(idx[tid], val[tid], idx[tid+ 2], val[tid+ 2], idx+tid, val+tid);
        if(BLOCK_SIZE>= 2)  argmax(idx[tid], val[tid], idx[tid+ 1], val[tid+ 1], idx+tid, val+tid);
    }
}

/////////////// SEQUENTIAL MINIMAL OPTIMIZATION KERNELS ///////////////////////////////
__global__ void __init(float* dFeatures, int dPitchFloat, int nObs, int nDim, float* dKernelDiag, float* dAlpha, float* dF, float* dLabels) { 
	int idx = blockIdx.x*blockDim.x + threadIdx.x;
	
    //compute diagonal elements of kernel matrix (each thread does a different one)
    //compute its own F (in this case, initialize to the value of the label)
    //set alphas to 0
	if (idx < nObs) {
		dKernelDiag[idx] = kernel(dFeatures+idx*dPitchFloat,dFeatures+idx*dPitchFloat, nDim);
		dF[idx] = -dLabels[idx];
		dAlpha[idx] = 0;
	}
}

__global__ void __smo_first_iter(float* dResult, float* dKernelDiag, float* dFeatures, int dPitchFloat, float* dAlpha, float c, int nDim, int iLow, int iHigh) { 
  
    float* pA = dFeatures + iHigh * dPitchFloat;
    float* pB = dFeatures + iLow * dPitchFloat;
    float eta = dKernelDiag[iHigh] + dKernelDiag[iLow] - 2*kernel(pA,pB,nDim);
	float alpha2New = 2/eta;
	if (alpha2New > c) alpha2New = c;
	if (alpha2New<0) alpha2New =0;
	dAlpha[iLow] = alpha2New;
	dAlpha[iHigh] = alpha2New;

    //return results    
	*(dResult + 0) = 0.0;
	*(dResult + 1) = 0.0;
	*(dResult + 2) = 1.0;
	*(dResult + 3) = -1.0;
	*(dResult + 6) = alpha2New;
	*(dResult + 7) = alpha2New;
}

__global__ void __smo_local(float* dFeatures, int dPitchFloat,  float* devLabels, int nObs, int nDim, float eps, float c_eps, float* dAlpha, float* dF, float alpha1Diff, float alpha2Diff, int iLow, int iHigh, float* dCache, int dCachePitchInFloats, int iLowCacheIndex, int iHighCacheIndex, bool iLowCompute, bool iHighCompute, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH) {

    //init some vars
    extern __shared__ float xIHighLow[];
	__shared__ int tempLocalIndices[BLOCK_SIZE];
	__shared__ float tempLocalFs[BLOCK_SIZE];
    float alpha, f, label;
    int flag; //iHigh=0, iLow=1, both=2, none=-1
    int globalIdx = blockDim.x*blockIdx.x + threadIdx.x;
  
	if (iHighCompute) {
		if(threadIdx.x==0)
		{
			for(int i =0; i< nDim; i++)
				xIHighLow[i] = dFeatures[iHigh*dPitchFloat+i];
		}
	}
	if (iLowCompute) {
		//Load xILow into shared memory
		if(threadIdx.x==0)
		{
			for(int i=0; i<nDim; i++)
				xIHighLow[nDim+i]=dFeatures[iLow*dPitchFloat+i];
		}
	}
	
	__syncthreads();

  
	if (globalIdx < nObs) {
		alpha = dAlpha[globalIdx];
		f = dF[globalIdx];
		label = devLabels[globalIdx];
    
		if (alpha > eps) {
			if (alpha < c_eps) {
				flag = 2; //Unbound support vector (I0)
			} else {
				if (label > 0) {
					flag = 1; //Bound positive support vector (I3)
				} else {
					flag = 0; //Bound negative support vector (I2)
				}
			}
		} else {
			if (label > 0) {
				flag = 0; //Positive nonsupport vector (I1)
			} else {
				flag = 1; //Negative nonsupport vector (I4)
			}
		}
	} else {
		flag = -1;
	}

  
	float highKernel = 0;
	float lowKernel = 0;
	if (flag >= 0) {
		if (!iHighCompute) {
			highKernel = dCache[(dCachePitchInFloats * iHighCacheIndex) + globalIdx];
		}
		if (!iLowCompute) {
			lowKernel = dCache[(dCachePitchInFloats * iLowCacheIndex) + globalIdx];
		}
		if (iHighCompute && iLowCompute) {
			dual_kernel(dFeatures + globalIdx*dPitchFloat, xIHighLow, xIHighLow + nDim ,nDim, highKernel, lowKernel);
		} else if (iHighCompute) {
			highKernel = kernel(dFeatures + globalIdx * dPitchFloat, xIHighLow, nDim);
		} else if (iLowCompute) {
			lowKernel = kernel(dFeatures + globalIdx * dPitchFloat,  xIHighLow + nDim, nDim);
		}

		//highKernel = phi(x_i, x_high);
		//lowKernel = phi(x_i, x_low);
		//Update the f_i
		f = f + alpha1Diff*highKernel + alpha2Diff*lowKernel;
		
		//update the cache
		if (iLowCompute)  dCache[(dCachePitchInFloats * iLowCacheIndex) + globalIdx] = lowKernel;
		if (iHighCompute) dCache[(dCachePitchInFloats * iHighCacheIndex) + globalIdx] = highKernel;
		dF[globalIdx] = f;
		
	}
	__syncthreads();

	//Ilow
	if ((flag==1) || (flag==2)) {		
		tempLocalFs[threadIdx.x] = f;
		tempLocalIndices[threadIdx.x] = globalIdx;
	} else {
		tempLocalFs[threadIdx.x] = -FLT_MAX;
	}

	__syncthreads();
  
    reduce_max(tempLocalFs, tempLocalIndices);

	__syncthreads();
  
  
	if (threadIdx.x == 0) {
		devLocalIndicesRL[blockIdx.x] = tempLocalIndices[0];
		devLocalFsRL[blockIdx.x] = tempLocalFs[0];
	}
 
	__syncthreads();

	//Ihigh
	if ((flag ==0)|| (flag ==2)) {
		tempLocalFs[threadIdx.x] = f;
		tempLocalIndices[threadIdx.x] = globalIdx;
	} else {
		tempLocalFs[threadIdx.x] = FLT_MAX;
	}
	__syncthreads();
   
	reduce_min(tempLocalFs, tempLocalIndices);
	if (threadIdx.x == 0) {
		devLocalIndicesRH[blockIdx.x] = tempLocalIndices[0];
		devLocalFsRH[blockIdx.x] = tempLocalFs[0];
	}
}


__global__ void __smo_global(float* dFeatures, int dPitchFloat, float* dLabels, float* dKernelDiag, float* dAlpha, float* dResult, float c, int nDim, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH, int inputSize) {
  
  __shared__ int tempIndices[BLOCK_SIZE];
  __shared__ float tempFs[BLOCK_SIZE];
  
  int iHigh;
  int iLow;
  float bHigh;
  float bLow;
  
  //Load elements
  if (threadIdx.x < inputSize) {
    tempIndices[threadIdx.x] = devLocalIndicesRH[threadIdx.x];
    tempFs[threadIdx.x] = devLocalFsRH[threadIdx.x];
  } else {
    tempFs[threadIdx.x] = FLT_MAX;
  }

  if (inputSize > BLOCK_SIZE) {
    for (int i = threadIdx.x + BLOCK_SIZE; i < inputSize; i += blockDim.x) {
      argmin(tempIndices[threadIdx.x], tempFs[threadIdx.x], devLocalIndicesRH[i], devLocalFsRH[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
    }
  }
  __syncthreads();

//  if (threadIdx.x==0) {
//    for (int i=0; i<inputSize; i++) {
//    cuPrintf("tempFs[%i] = %f\n", i, tempFs[i]);
//    }
//  }

  reduce_min(tempFs, tempIndices);

  __syncthreads();
  if(threadIdx.x==0){
	  iHigh = tempIndices[0];
	  bHigh = tempFs[0];
  }
  
  __syncthreads();

  //Load elements
  if (threadIdx.x < inputSize) {
    tempIndices[threadIdx.x] = devLocalIndicesRL[threadIdx.x];
    tempFs[threadIdx.x] = devLocalFsRL[threadIdx.x];
  } else {
    tempFs[threadIdx.x] = -FLT_MAX;
  }


  //blockDim.x==number per block
  if (inputSize > BLOCK_SIZE) {
    for (int i = threadIdx.x + BLOCK_SIZE; i < inputSize; i += blockDim.x) {
      argmax(tempIndices[threadIdx.x], tempFs[threadIdx.x], devLocalIndicesRL[i], devLocalFsRL[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
    }
  }
  __syncthreads();
  
  reduce_max(tempFs, tempIndices);
  
  __syncthreads();
  
  if (threadIdx.x == 0) {
	iLow = tempIndices[0];
	bLow = tempFs[0];
    float kernelEval = kernel(dFeatures+iHigh*dPitchFloat, dFeatures + iLow *dPitchFloat, nDim);
    float eta = dKernelDiag[iHigh] + dKernelDiag[iLow] - 2*kernelEval;
    
    //compute alpha within feasible region
    float alpha1Old = dAlpha[iHigh];
	float alpha2Old = dAlpha[iLow];
	float alphaDiff = alpha2Old - alpha1Old;
	float lowLabel = dLabels[iLow];
	float sign = dLabels[iHigh] * lowLabel;
	float alpha2UpperBound;
	float alpha2LowerBound;
	if (sign < 0) {
		if (alphaDiff < 0) {
			alpha2LowerBound = 0;
			alpha2UpperBound = c + alphaDiff;
		} else {
			alpha2LowerBound = alphaDiff;
			alpha2UpperBound = c;
		}
	} else {
		float alphaSum = alpha2Old + alpha1Old;
		if (alphaSum < c) {
			alpha2UpperBound = alphaSum;
			alpha2LowerBound = 0;
		} else {
			alpha2LowerBound = alphaSum - c;
			alpha2UpperBound = c;
		}
	}
	float alpha2New;
	if (eta > 0) {
		alpha2New = alpha2Old + lowLabel*(bHigh - bLow)/eta;
		if (alpha2New < alpha2LowerBound) {
			alpha2New = alpha2LowerBound;
		} else if (alpha2New > alpha2UpperBound) {
			alpha2New = alpha2UpperBound;
		}
	} else {
		float slope = lowLabel * (bHigh - bLow);
		float delta = slope * (alpha2UpperBound - alpha2LowerBound);
		if (delta > 0) {
			if (slope > 0) {
				alpha2New = alpha2UpperBound;
			} else {
				alpha2New = alpha2LowerBound;
			}
		} else {
			alpha2New = alpha2Old;
		}
	}
    float alpha2Diff = alpha2New - alpha2Old;
	float alpha1Diff = -sign*alpha2Diff;
	float alpha1New = alpha1Old + alpha1Diff;

    //store results
    *(dResult + 0) = alpha2Old;
    *(dResult + 1) = alpha1Old;
    *(dResult + 2) = bLow;
    *(dResult + 3) = bHigh;
    dAlpha[iLow] = alpha2New;
    dAlpha[iHigh] = alpha1New;
    *(dResult + 4) = alpha2New;
    *(dResult + 5) = alpha1New;
    *(dResult + 6) = float(iLow);
    *(dResult + 7) = float(iHigh);
  }
  
}

#endif
