#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h> 
#include "inc/cuda_utils.h"
#include "svm_kernels.h"

void svm_train(float*, float*, float**, float&, int, int, float, float, float);

//point of entry for command line application
int main(int argc, char **argv) {
    cudaPrintfInit();
     
	//get file 
    const char *dataFile = argv[1];
    int inputNameLength = strlen(dataFile);
    char *outFile;
    outFile = (char*)malloc(sizeof(char)*(inputNameLength+10));
    char temp[] = ".our.model";
    strcpy(outFile, dataFile);
    strcat(outFile, temp);
    printf("Model results in: %s\n", outFile);
        
    //init some stuff
	int nObs, nDim;
	float c = 1.0f;  
	float tol = 2e-3f;
	float eps = 1e-5f;
    struct timeval start;
    struct timeval end;
    float *hFeatures;
	float *hLabels;
	float *hAlpha;
    float b;
               
	//read in file
	read_data(dataFile, &hFeatures, &hLabels, &nObs, &nDim);
	printf("Input data found: %d points, %d dimensions\n", nObs, nDim);

    //start timer for main svm function call
	gettimeofday(&start, 0);
    svm_train(hFeatures, hLabels, &hAlpha, b, nObs, nDim, c, eps, tol);
	gettimeofday(&end, 0);
    
    //print out alphas
//    for (int i=0; i<nObs; i++) {
//        printf("alpha[%i] = %f\n", i, hAlpha[i]);
//    }

    //compute training time
    float sec = (float)(end.tv_sec-start.tv_sec);
    float usec = (float)(end.tv_usec-start.tv_usec)*1e-6;
	float ttime = sec+usec;
    printf("Training time : %f seconds\n", ttime);

    //print model results
    print_model(outFile, hAlpha, hLabels, hFeatures, b, nObs, nDim, eps);
    
    //free stuff
    cudaPrintfEnd();
	free(hFeatures);
	free(hLabels);
	free(hAlpha);
    return 0;
}

/*
//point of entry for matlab/octave MEX
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    //perform some checks
    if (mxIsClass(prhs[0],"single")+mxIsClass(prhs[1],"single")!=2)
    {
        mexErrMsgTxt("Feature and label vector not single precision floating point");
    }

    //init some vars
    const mwSize *dims;
    int nObs, nDim;
    float *features, *labels;

    //retrieve data from rhs
    features = (float*)mxGetData(prhs[0]);
    labels = (float*)mxGetData(prhs[1]);
    float c =  (float)*(float*)mxGetData(prhs[2]);
    float eps = (float)*(float*)mxGetData(prhs[3]);
    float tol = (float)*(float*)mxGetData(prhs[4]);
    float *tempAlpha; 

    //get dimensions
    dims = mxGetDimensions(prhs[0]);
    nObs = (int)dims[1]; nDim = (int)dims[0];

    //test inputs
    //first two rows of features
//    for (int i=0; i<nDim*5; i++) {
//        printf("features[%i]=%f\n",i,features[i]);
//    }
//    for (int i=0; i<5; i++) {
//        printf("labels[%i]=%f\n",i,labels[i]);
//    }
//    for (int i=0; i<nObs; i++) {
//        printf("alpha[%i}=%f\n",i,alpha[i]);
//    }

    svm_train(features, labels, &tempAlpha, nObs, nDim, c, eps, tol);

    //associate alpha w data
    plhs[0] = mxMalloc(nObs*sizeof(float));
    mxSetData(plhs[0], tempAlpha);

    //clear variables
//    mxFree((float*)features);
//    mxFree((float*)labels);
//    mxFree((float*)c);
//    mxFree((float*)eps);
//    mxFree((float*)tol);
}
*/

void svm_train(float *features, float *labels, float **alpha, float &b, int nObs, int nDim, float c, float eps, float tol) {
    //init some vars
    float c_eps = c-eps;
	float *dFeatures, *dKernelDiag, *dLabels, *dAlpha, *dF, *dCache, *dResult, *hResult, *hFeatures, *hAlpha, *dLocalLowFs, *dLocalHighFs;
    int dPitchFloat, hPitchSize, nBlocks;
    int iter;
	size_t dPitch, remainingMemory, totalMemory; 
	int *dLocalLowIs, *dLocalHighIs;
    float alpha1Old, alpha2Old, alpha1New, alpha2New, alpha1Diff, alpha2Diff;
    int iLow, iHigh, iLowCache, iHighCache;
    float bLow, bHigh;
    bool iLowCompute, iHighCompute;
    size_t floatSize = sizeof(float);
    size_t intSize = sizeof(int);

    ////////////// STEP 1: ALLOCATE VARS TO THE DEVICE ////////////////////////////////
    //setup some stuff
    (nObs%BLOCK_SIZE!=0)?(nBlocks=nObs/BLOCK_SIZE+1):(nBlocks=nObs/BLOCK_SIZE);
    hAlpha = (float *)malloc(floatSize*nObs);
    *alpha = hAlpha;
 
	//alloc the training data (aligned), then populate
    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&dFeatures,&dPitch,nDim*floatSize,nObs));
	dPitchFloat = (int) (dPitch/floatSize);
    hFeatures = (float*)malloc(dPitch*nObs);
 	hPitchSize = dPitch/floatSize;	
   	for(int i=0; i<nObs; i++) {
   		for(int j=0; j<nDim; j++) hFeatures[i*hPitchSize+j]=features[i*nDim+j];
	}
    CUDA_CHECK_ERROR(cudaMemcpy(dFeatures, hFeatures, dPitch*nObs, cudaMemcpyHostToDevice));

    //allocate other device arrays
    CUDA_CHECK_ERROR(cudaMalloc((void**)&dKernelDiag, nObs*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLabels, nObs*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dAlpha, nObs*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dF, nObs*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dResult, 8*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLocalLowFs, nBlocks*floatSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLocalHighFs, nBlocks*floatSize)); 
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLocalLowIs, nBlocks*intSize));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLocalHighIs, nBlocks*intSize));
    CUDA_CHECK_ERROR(cudaMemcpy(dLabels,labels,nObs*floatSize,cudaMemcpyHostToDevice));
	hResult = (float*)malloc(8*floatSize);
  
	//allocate and initialize cache
	void* temp;
	size_t rowPitch;
	CUDA_CHECK_ERROR(cudaMallocPitch(&temp, &rowPitch, nObs*floatSize, 2));
	CUDA_CHECK_ERROR(cudaFree(temp));
	cudaMemGetInfo(&remainingMemory, &totalMemory);
	size_t cacheSize = (remainingMemory*0.95)/rowPitch;
	cacheSize = (int)((float)cacheSize);
	if (nObs < cacheSize) { cacheSize = nObs; }
	size_t cachePitch;
	CUDA_CHECK_ERROR(cudaMallocPitch((void**)&dCache, &cachePitch, nObs*floatSize, cacheSize));
	int dCachePitchFloat = (int)cachePitch/floatSize;
    int *Cache = (int*)malloc(cacheSize*sizeof(int));
    for (int i=0; i<cacheSize; i++) Cache[i]=-1;
    iLowCompute=1; iHighCompute=1;
    iLowCache=0; iHighCache=0;

    //check for the last allocation error
	cudaError_t err = cudaGetLastError();
	if(err) { printf("Error: %s\n", cudaGetErrorString(err)); }
	printf("Allocated arrays on GPU\n");

    ///////////// STEP 2: INITIALIZE SMO ALGORITHM ////////////////////////////////////
	dim3 blocksLocal(nBlocks);
    dim3 threadsLocal(BLOCK_SIZE);
    __init<<<blocksLocal, threadsLocal>>>(dFeatures, dPitchFloat, nObs, nDim, dKernelDiag, dAlpha, dF, dLabels);

    //choose initial iterates
    bLow = 1; bHigh = -1; iLow = -1; iHigh = -1;
    for (int i = 0; i < nObs; i++) {
        if (labels[i] < 0) {
            if (iLow == -1) {
                iLow = i; if (iHigh > -1) { i = nObs; }
            }
        } else {
            if (iHigh == -1) {
				iHigh = i;	if (iLow > -1) { i = nObs; }
            }
        }
    }

    //unroll the first iteration and update results
    dim3 blockSingle(1);
    dim3 threadSingle(1);
    __smo_first_iter<<<blockSingle, threadSingle>>>(dResult, dKernelDiag, dFeatures, dPitchFloat, dAlpha, c, nDim, iLow, iHigh);
    CUDA_CHECK_ERROR(cudaMemcpy((void*)hResult, (void*)dResult, 8*floatSize, cudaMemcpyDeviceToHost));
    alpha2Old  = *(hResult+0);
    alpha1Old  = *(hResult+1);
    bLow       = *(hResult+2);
    bHigh      = *(hResult+3);
    alpha2New  = *(hResult+6);
    alpha1New  = *(hResult+7);
    alpha1Diff = alpha1New - alpha1Old;
    alpha2Diff = alpha2New - alpha2Old;
   
    /////////////// STEP 3: RUN MAIN SMO ITERATION LOOP /////////////////////////////// 
    // prepare for main iteration loop
    dim3 threadsGlobal(BLOCK_SIZE);
    int sharedMemSize = 2*nDim*floatSize;
    printf("Starting iterations\n");
//	Cache kernelCache(nObs, cacheSize);
    for (iter=1; 1; iter++) {
//    do {
        if (bLow <= bHigh+2*tol) {
            printf("Houston we have converged\n");
            break;
        }

        //find kernel cache data for iHigh and iLow indices
//        kernelCache.findData(iHigh, iHighCache, iHighCompute);
//        kernelCache.findData(iLow, iLowCache, iLowCompute);
        check_cache(iHigh, iLow, iHighCompute, iLowCompute, iHighCache, iLowCache, Cache, cacheSize);

        //run the local and global SMO GPU kernels
        __smo_local<<<blocksLocal, threadsLocal, sharedMemSize>>>(dFeatures, dPitchFloat, dLabels, nObs, nDim, eps, c_eps, dAlpha, dF, alpha1Diff*labels[iHigh], alpha2Diff*labels[iLow], iLow, iHigh, dCache, dCachePitchFloat, iLowCache, iHighCache, iLowCompute, iHighCompute, dLocalLowIs, dLocalHighIs, dLocalLowFs, dLocalHighFs);
        __smo_global<<<1,threadsGlobal>>>(dFeatures, dPitchFloat, dLabels, dKernelDiag, dAlpha, dResult, c, nDim, dLocalLowIs, dLocalHighIs, dLocalLowFs, dLocalHighFs, blocksLocal.x);
     
        //return result and compute alpha
        CUDA_CHECK_ERROR(cudaMemcpy((void*)hResult, (void*)dResult, 8*floatSize, cudaMemcpyDeviceToHost));
        alpha2Old = *(hResult+0);
        alpha1Old = *(hResult+1);
        bLow      = *(hResult+2);
        bHigh     = *(hResult+3);
        alpha2New = *(hResult+4);
        alpha1New = *(hResult+5);
        iLow      = *(hResult+6);
        iHigh     = *(hResult+7);
        alpha1Diff = alpha1New - alpha1Old;
        alpha2Diff = alpha2New - alpha2Old;
		
//		printf("iLow: %d\n",iLow);
//		printf("iHigh: %d\n",iHigh);
//        cudaPrintfDisplay(stdout,true);
		
//    } while (bLow-bHigh > tol);
      }	
	//finalization: print some statistics and get lagrangian weights from device
    b = (bLow - bHigh)/2;
    printf("%d iterations\n", iter);
    printf("bLow: %f, bHigh: %f\n", bLow, bHigh);
//    kernelCache.printStatistics();
	CUDA_CHECK_ERROR(cudaMemcpy((void*)hAlpha, (void*)dAlpha, nObs*floatSize, cudaMemcpyDeviceToHost));

    //free vars
	cudaFree(dFeatures);
 	cudaFree(dKernelDiag);
 	cudaFree(dLabels);
 	cudaFree(dAlpha);
 	cudaFree(dF);
    cudaFree(dResult);
    cudaFree(dLocalLowFs);
    cudaFree(dLocalHighFs);
    cudaFree(dLocalLowIs);
    cudaFree(dLocalHighIs);
 	free(hResult);
    free(hFeatures);
}

