#ifndef _SVM_KERNELS_H_
#define _SVM_KERNELS_H_

#include <float.h>
#include <math.h>
#include "util/cuPrintf.cu"

#define BLOCK_SIZE 128
#define MB_LEAVE  100

///////////////////////DATA AND MODEL I/O ///////////////////////////////////////
static char *line = NULL;
static int maxLineLen;

static char* read_line(FILE *input) {
    int len;
    if(fgets(line, maxLineLen, input) == NULL)
        return NULL;
    while(strrchr(line,'\n') == NULL) {
        maxLineLen *= 2;
        line = (char*)realloc(line,maxLineLen);
        len = (int)strlen(line);
        if(fgets(line+len, maxLineLen-len, input) == NULL)
            break;
    }
    return line;
}

int read_data(const char *filename, float **p_features, float **p_labels, int *p_nObs, int *p_nDim) {
    
    int nObs, nDim, instMaxIdx, index;
    float value;
    char *endptr;
    char *idx, *val, *label, *p;

    FILE *inputFile = fopen(filename, "r");
    if (inputFile == 0) {
        printf("File not found!\n");
        return 0;
    }
    
    nObs=0; nDim=0; instMaxIdx=0; maxLineLen=1024;
    line = (char*)malloc(maxLineLen*sizeof(char));

    //determine number of training obs and number of features
    while(read_line(inputFile)!=NULL) {
        p = strtok(line," \t");
        while(1)
        {
            idx = strtok(NULL,":");
            p = strtok(NULL," \t");
            if (p==NULL || *p=='\n') break;
            instMaxIdx = (int)strtol(idx, &endptr, 10);
            if (instMaxIdx > nDim) nDim = instMaxIdx;
        }
        ++nObs;
    }

    //now read in the data into memory
    rewind(inputFile);
    *(p_nObs) = nObs;
    *(p_nDim) = nDim;
    float *features = (float*)malloc(sizeof(float)*nObs*nDim);
    float *labels = (float*)malloc(sizeof(float)*nObs);
    *(p_features) = features;
    *(p_labels) = labels;
    for (int i=0; i<nObs; i++) {
        read_line(inputFile);
        label = strtok(line, " \t\n");
        labels[i] = atof(label);
        while (1) {
            idx = strtok(NULL, ":");
            val = strtok(NULL, " \t");
            if (val == NULL) break;
            index = (int)strtol(idx, &endptr, 10);
            value = (float)strtod(val, &endptr);
            features[i*nDim+index-1] = value;
        }
    }

    fclose(inputFile);
    return 1;
}

void print_model(const char *outFileName, float *alpha, float *labels, float *features, float b, float iter, int nObs, int nDim, float eps)
{
    int nSV = 0; int pSV = 0;

    printf("Output File: %s\n", outFileName);
    FILE *outFile = fopen(outFileName, "w");
    if (outFile == NULL) { 
        printf("Can't write to %s\n", outFile); 
        exit(1); 
    }

    //tally number of support vectors
    for (int i=0; i<nObs; i++) {
        if (alpha[i] > eps) {
            if (labels[i] > 0) { pSV++; }
            else               { nSV++; }
        }
    }

    //produce model output
    fprintf(outFile, "SVM Model Results:\n");
    fprintf(outFile, "# iterations:\n", iter);
    fprintf(outFile, "Total # of SVs: %d\n", pSV+nSV);
    fprintf(outFile, "Total +SVs: %d\n", pSV);
    fprintf(outFile, "Total -SVs: %d\n", nSV);
    fprintf(outFile, "B offset estimate: %f\n", b);

    //print support vectors
//    fprintf(outFile, "\nSupport Vectors:\n");
//    for (int i=0; i<nObs; i++) {
//        if (alpha[i] > eps) {
//            fprintf(outFile, "[%i] %f ", i, labels[i]*alpha[i]);
//            for (int j=0; j<nDim; j++) {
//                fprintf(outFile, "%d:%f ", j+1, features[j*nObs+i]);
//            }
//            fprintf(outFile, "\n");
//        }
//    }
//    fclose(outFile);

}

///////////////////////////// CACHING //////////////////////////////////////////
void check_cache(int iHigh, int iLow, bool& iHighCompute, bool& iLowCompute, int& iHighCache, int& iLowCache, int *Cache, int cacheSize) {
    //use modulo operator to determine cache position
    iHighCache = iHigh%cacheSize;
    iLowCache = iLow%cacheSize;
   
    //check to see if iHigh and iLow are in the cache 
    if (Cache[iHighCache] == iHigh) iHighCompute = 0;
    else iHighCompute = 1;
    if (Cache[iLowCache] == iLow) iHighCompute = 0;
    else iLowCompute = 1;

    //if we have to compute, set to reflect that we've done the computation and
    //curr value of iHigh and iLow are now in the cache
    if (iHighCompute) Cache[iHighCache] = iHigh;
    if (iLowCompute) Cache[iHighCache] = iLow;
}

///////////////////////////// LINEAR KERNEL FUNCTIONS ////////////////////////////////
//compute phi(A'B)
static __device__ __host__ float kernel(float *pA, float *pB, int nDim) {
    float acc = 0.0f;
	for(int i=0; i<nDim; i++)
		acc += (*(pA+i)) * (*(pB+i));
	return acc;
}
//compute phi(a,b) and phi(a,c), where b and c are already in shared memory
//and a is in global memory
static __device__ __host__ 
void dual_kernel(float *pA, float *pB, float *pC, int nDim, float& phi1, float& phi2) {
    float acc1 = 0.0f; float acc2 = 0.0f;
    for(int i=0; i<nDim; i++){
        float tmpA = *(pA+i);
        acc1 += tmpA * (*(pB+i));
        acc2 += tmpA * (*(pC+i));
    } 
    phi1 = acc1; phi2 = acc2;
}

////////////////////////////// REDUCTION KERNELS //////////////////////////////////////
__device__ void argmin(int AIdx, float AVal, int BIdx, float BVal, int* idx, float* val) {
	if (BVal < AVal) { *idx = BIdx; *val = BVal; }
    else { *idx = AIdx; *val = AVal; }
}
__device__ void argmax(int AIdx, float AVal, int BIdx, float BVal, int* idx, float* val) {
	if (BVal > AVal) { *idx = BIdx; *val = BVal; }
    else { *idx = AIdx; *val = AVal; }
}
__device__ void reduce_min(float* val, int *idx) {
    int tid = threadIdx.x;
    if(BLOCK_SIZE >=512){if(tid<256){argmin(idx[tid], val[tid], idx[tid+256], val[tid+256], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=256){if(tid<128){argmin(idx[tid], val[tid], idx[tid+128], val[tid+128], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=128){if(tid< 64){argmin(idx[tid], val[tid], idx[tid+ 64], val[tid+ 64], idx+tid, val+tid);} __syncthreads();}

    if(tid<32){
        if(BLOCK_SIZE>=64)  argmin(idx[tid], val[tid], idx[tid+32], val[tid+32], idx+tid, val+tid);
        if(BLOCK_SIZE>=32)  argmin(idx[tid], val[tid], idx[tid+16], val[tid+16], idx+tid, val+tid);
        if(BLOCK_SIZE>=16)  argmin(idx[tid], val[tid], idx[tid+ 8], val[tid+ 8], idx+tid, val+tid);
        if(BLOCK_SIZE>= 8)  argmin(idx[tid], val[tid], idx[tid+ 4], val[tid+ 4], idx+tid, val+tid);
        if(BLOCK_SIZE>= 4)  argmin(idx[tid], val[tid], idx[tid+ 2], val[tid+ 2], idx+tid, val+tid);
        if(BLOCK_SIZE>= 2)  argmin(idx[tid], val[tid], idx[tid+ 1], val[tid+ 1], idx+tid, val+tid);
    }
}
__device__ void reduce_max(float* val, int *idx) {
    int tid = threadIdx.x;
    if(BLOCK_SIZE >=512){if(tid<256){argmax(idx[tid], val[tid], idx[tid+256], val[tid+256], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=256){if(tid<128){argmax(idx[tid], val[tid], idx[tid+128], val[tid+128], idx+tid, val+tid);} __syncthreads();}
    if(BLOCK_SIZE >=128){if(tid< 64){argmax(idx[tid], val[tid], idx[tid+ 64], val[tid+ 64], idx+tid, val+tid);} __syncthreads();}

    if(tid<32){
        if(BLOCK_SIZE>=64)  argmax(idx[tid], val[tid], idx[tid+32], val[tid+32], idx+tid, val+tid);
        if(BLOCK_SIZE>=32)  argmax(idx[tid], val[tid], idx[tid+16], val[tid+16], idx+tid, val+tid);
        if(BLOCK_SIZE>=16)  argmax(idx[tid], val[tid], idx[tid+ 8], val[tid+ 8], idx+tid, val+tid);
        if(BLOCK_SIZE>= 8)  argmax(idx[tid], val[tid], idx[tid+ 4], val[tid+ 4], idx+tid, val+tid);
        if(BLOCK_SIZE>= 4)  argmax(idx[tid], val[tid], idx[tid+ 2], val[tid+ 2], idx+tid, val+tid);
        if(BLOCK_SIZE>= 2)  argmax(idx[tid], val[tid], idx[tid+ 1], val[tid+ 1], idx+tid, val+tid);
    }
}

///////////////SEQUENTIAL MINIMAL OPTIMIZATION KERNELS///////////////////////////////
__global__ void __init(float* dFeatures, int dPitchFloat, int nObs, int nDim, float* dKernelDiag, float* dAlpha, float* dF, float* dLabels) { 
	int idx = blockIdx.x*blockDim.x + threadIdx.x;
	
    //compute diagonal elements of kernel matrix (each thread does a different one)
    //compute its own F (in this case, initialize to the value of the label)
    //set alphas to 0
	if (idx < nObs) {
		dKernelDiag[idx] = kernel(dFeatures+idx*dPitchFloat,dFeatures+idx*dPitchFloat, nDim);
		dF[idx] = -dLabels[idx];
		dAlpha[idx] = 0;
	}
}

__global__ void __smo_first_iter(float* dResult, float* dKernelDiag, float* dFeatures, int dPitchFloat, float* dAlpha, float c, int nDim, int iLow, int iHigh) { 
  
    float* pA = dFeatures + iHigh * dPitchFloat;
    float* pB = dFeatures + iLow * dPitchFloat;
    float eta = dKernelDiag[iHigh] + dKernelDiag[iLow] - 2*kernel(pA,pB,nDim);
	float alpha2New = 2/eta;
	if (alpha2New > c) alpha2New = c;
	if (alpha2New<0) alpha2New =0;
	dAlpha[iLow] = alpha2New;
	dAlpha[iHigh] = alpha2New;

    //return results    
	*(dResult + 0) = 0.0;
	*(dResult + 1) = 0.0;
	*(dResult + 2) = 1.0;
	*(dResult + 3) = -1.0;
	*(dResult + 6) = alpha2New;
	*(dResult + 7) = alpha2New;
}

__global__ void __smo_local(float* dFeatures, int dPitchFloat,  float* devLabels, int nObs, int nDim, float eps, float c_eps, float* dAlpha, float* dF, float alpha1Diff, float alpha2Diff, int iLow, int iHigh, float* dCache, int dCachePitch, int iLowCacheIndex, int iHighCacheIndex, bool iLowCompute, bool iHighCompute, int* dLocalLowIs, int* dLocalHighIs, float* dLocalLowFs, float* dLocalHighFs) {

    //init some vars
    extern __shared__ float xIHighLow[];
	__shared__ int tempIs[BLOCK_SIZE];
	__shared__ float tempLocalFs[BLOCK_SIZE];
    float alpha, f, label;
    int flag; 
    int globalIdx = blockDim.x*blockIdx.x + threadIdx.x;
	float highKernel = 0;
	float lowKernel = 0;
  
    //if we have to compute the kernel matrix for this iteration
    //then store x_IHigh and x_ILow in shared memory
	if (iHighCompute) {
		if(threadIdx.x==0)
		{
			for(int i =0; i< nDim; i++)
				xIHighLow[i] = dFeatures[iHigh*dPitchFloat+i];
		}
	}
	if (iLowCompute) {
		//Load xILow into shared memory
		if(threadIdx.x==0)
		{
			for(int i=0; i<nDim; i++)
				xIHighLow[nDim+i]=dFeatures[iLow*dPitchFloat+i];
		}
	}
	__syncthreads();

    //determine if thread's partition is part of working set
    //b_high = min{f_i | i in I_0&I_1&I_2}
    //b_low = max{f_i | i in I_0&I_3&I_4} 
    //if not in working set then not part of the reduction process 
	if (globalIdx < nObs) {
		alpha = dAlpha[globalIdx];
		f = dF[globalIdx];
		label = devLabels[globalIdx];
    
		if (alpha > eps) {
			if (alpha < c_eps) {
				flag = 2; //Unbound support vector (I0)
			} else {
				if (label > 0) {
					flag = 1; //Bound positive support vector (I3)
				} else {
					flag = 0; //Bound negative support vector (I2)
				}
			}
		} else {
			if (label > 0) {
				flag = 0; //Positive nonsupport vector (I1)
			} else {
				flag = 1; //Negative nonsupport vector (I4)
			}
		}
	} else {
		flag = -1;
	}

    //compute the kernel if the thread is flagged as I1-I4
	if (flag >= 0) {
		if (!iHighCompute) {
			highKernel = dCache[(dCachePitch * iHighCacheIndex) + globalIdx];
		}
		if (!iLowCompute) {
			lowKernel = dCache[(dCachePitch * iLowCacheIndex) + globalIdx];
		}
		if (iHighCompute && iLowCompute) {
			dual_kernel(dFeatures + globalIdx*dPitchFloat, xIHighLow, xIHighLow + nDim ,nDim, highKernel, lowKernel);
		} else if (iHighCompute) {
			highKernel = kernel(dFeatures + globalIdx * dPitchFloat, xIHighLow, nDim);
		} else if (iLowCompute) {
			lowKernel = kernel(dFeatures + globalIdx * dPitchFloat,  xIHighLow + nDim, nDim);
		}

		//Update the f_i
		f = f + alpha1Diff*highKernel + alpha2Diff*lowKernel;
		
		//update the cache
		if (iLowCompute)  dCache[(dCachePitch * iLowCacheIndex) + globalIdx] = lowKernel;
		if (iHighCompute) dCache[(dCachePitch * iHighCacheIndex) + globalIdx] = highKernel;
		dF[globalIdx] = f;
		
	}
	__syncthreads();

	//Compute iLow via local reduction process
	if ((flag==1) || (flag==2)) {		
		tempLocalFs[threadIdx.x] = f;
		tempIs[threadIdx.x] = globalIdx;
	} else {
		tempLocalFs[threadIdx.x] = -FLT_MAX;
	}
	__syncthreads();
    reduce_max(tempLocalFs, tempIs);
	__syncthreads();
	if (threadIdx.x == 0) {
		dLocalLowIs[blockIdx.x] = tempIs[0];
		dLocalLowFs[blockIdx.x] = tempLocalFs[0];
	}
	__syncthreads();

	//Do the same for iHigh
	if ((flag ==0) || (flag ==2)) {
		tempLocalFs[threadIdx.x] = f;
		tempIs[threadIdx.x] = globalIdx;
	} else {
		tempLocalFs[threadIdx.x] = FLT_MAX;
	}
	__syncthreads();
	reduce_min(tempLocalFs, tempIs);
	if (threadIdx.x == 0) {
		dLocalHighIs[blockIdx.x] = tempIs[0];
		dLocalHighFs[blockIdx.x] = tempLocalFs[0];
	}
}

__global__ void __smo_global(float* dFeatures, int dPitchFloat, float* dLabels, float* dKernelDiag, float* dAlpha, float* dResult, float c, int nDim, int* dLocalLowIs, int* dLocalHighIs, float* dLocalLowFs, float* dLocalHighFs, int inputSize) {
  
    //init vars
    __shared__ int tempIndices[BLOCK_SIZE];
    __shared__ float tempFs[BLOCK_SIZE];
    int iHigh, iLow;
    float bHigh, bLow;
  
    //load each local F_high into shared memory for global reduction
    if (threadIdx.x < inputSize) {
        tempIndices[threadIdx.x] = dLocalHighIs[threadIdx.x];
        tempFs[threadIdx.x] = dLocalHighFs[threadIdx.x];
    } else {
        tempFs[threadIdx.x] = FLT_MAX;
    }
    //if we have more blocks than BLOCK_SIZE, we update each element of tempIndices and tempFs using
    //the correct thread ID
    if (inputSize > BLOCK_SIZE) {
        for (int i = threadIdx.x + BLOCK_SIZE; i < inputSize; i += blockDim.x) {
        argmin(tempIndices[threadIdx.x], tempFs[threadIdx.x], dLocalHighIs[i], dLocalHighFs[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
        }
    }
//  if (threadIdx.x==0) {
//    for (int i=0; i<inputSize; i++) {
//    cuPrintf("tempFs[%i] = %f\n", i, tempFs[i]);
//    }
//  }
    //perform global reduction step...a lot of barriers, any way to cut down on these?
    __syncthreads();
    reduce_min(tempFs, tempIndices);
    __syncthreads();
    if(threadIdx.x==0){
        iHigh = tempIndices[0];
	    bHigh = tempFs[0];
    }
    __syncthreads();
    
    //Do the same thing for the low Fs
    if (threadIdx.x < inputSize) {
        tempIndices[threadIdx.x] = dLocalLowIs[threadIdx.x];
        tempFs[threadIdx.x] = dLocalLowFs[threadIdx.x];
    } else {
        tempFs[threadIdx.x] = -FLT_MAX;
    }
    if (inputSize > BLOCK_SIZE) {
        for (int i = threadIdx.x + BLOCK_SIZE; i < inputSize; i += blockDim.x) {
            argmax(tempIndices[threadIdx.x], tempFs[threadIdx.x], dLocalLowIs[i], dLocalLowFs[i], tempIndices + threadIdx.x, tempFs + threadIdx.x);
        }
    }
    __syncthreads();
    reduce_max(tempFs, tempIndices);
    __syncthreads();
 
    //we now have our globally optimal Fs. update the relevant values of alpha 
    if (threadIdx.x == 0) {
        iLow = tempIndices[0];
    	bLow = tempFs[0];

        //compute alpha within feasible region
        //init vars
    	float eta, range, upBd, lowBd, alpha1New, alpha2New;
        float alpha1Old = dAlpha[iHigh];
    	float alpha2Old = dAlpha[iLow];
    	float alphaDiff = alpha2Old-alpha1Old;
        float alphaSum = alpha2Old+alpha1Old;
        float bDiff = bHigh-bLow;
        float sign = dLabels[iHigh]*dLabels[iLow];
        
        //compute kernel matrix portion of update
        eta = dKernelDiag[iHigh] + dKernelDiag[iLow] - 2*kernel(dFeatures+iHigh*dPitchFloat, dFeatures+iLow*dPitchFloat, nDim);
    	
        //compute the lower and upper bound for alpha2New
        if (sign < 0) {
    		if (alphaDiff < 0) {
    			lowBd = 0;
    			upBd = c + alphaDiff;
    		} else {
    			lowBd = alphaDiff;
    			upBd = c;
    		}
    	} else {
    		if (alphaSum < c) {
    			upBd = alphaSum;
    			lowBd = 0;
    		} else {
    			lowBd = alphaSum-c;
    			upBd = c;
    		}
    	}

        //update alpha2 and clip it to the feasible region based on the computed
        //bounds before
        range = upBd - lowBd;
    	if (eta > 0) {
            //update alpha_iLow
    		alpha2New = alpha2Old + dLabels[iLow]*bDiff/eta;
    		if (alpha2New < lowBd)     { alpha2New = lowBd; }
    		else if (alpha2New > upBd) { alpha2New = upBd; }
    	} else {
    		float delta = dLabels[iLow]*bDiff*range;
    		if (delta > 0) {
    			if (dLabels[iLow]*bDiff > 0) { alpha2New = upBd; } 
                else                         { alpha2New = lowBd; }
    		} else { alpha2New = alpha2Old;	}
    	}

        //update alpha_iHigh
        //don't have to clip this - we're guaranteed to have alpha1New in [0,C]
    	alpha1New = alpha1Old-sign*(alpha2New-alpha2Old);
    
        //store in result vector and update global alpha vector
        *(dResult + 0) = alpha2Old;
        *(dResult + 1) = alpha1Old;
        *(dResult + 2) = bLow;
        *(dResult + 3) = bHigh;
        *(dResult + 4) = alpha2New;
        *(dResult + 5) = alpha1New;
        *(dResult + 6) = float(iLow);
        *(dResult + 7) = float(iHigh);
        dAlpha[iLow] = alpha2New;
        dAlpha[iHigh] = alpha1New;
  }
}

#endif
