#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "svm.cpp"
#include <cuda.h>
#include <sys/time.h> 
#include "cuda_utils.h"

#define intRoundUp(a, b) (a%b!=0)?(a/b+1):(a/b)
#define BLOCKSIZE 256

__global__ void smo_init(float* dData, int dDataPitchSize, int nObs, int nDim, float* dKernelDiag, float* dAlpha, float* dF, float* dLabels);
__global__ void smo_first_iter(float* dResult, float* dKernelDiag, float* dData, int dDataPitchSize, float* dAlpha, float cost, int nDim, int iLow, int iHigh);

__global__ void smo_local(float* dData, int dDataPitchSize,  float* dLabels, int nObs, int nDim, float epsilon, float cost_epsilon, float* dAlpha, float* dF, float alpha1Diff, float alpha2Diff, int iLow, int iHigh, float* dCache, int dCachePitchSize, int iLowCacheIndex, int iHighCacheIndex, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH, bool iHighCompute, bool iLowCompute);
__global__ void smo_global(float* dData, int dDataPitchSize, float* dLabels, float* dKernelDiag, float* dAlpha, float* dResult, float cost, int nDim, int* devLocalIndicesRL, int* devLocalIndicesRH, float* devLocalFsRL, float* devLocalFsRH, int inputSize, float epsilon);


int readSvm(const char* filename, float** p_data, float** p_labels, int* p_nObs, int* p_nDim);
void printModel(const char* outputFileName, float* alpha, float* labels, float* data, int nPoints, int nDimension, float epsilon, float rho);

void check_cache(int iHigh, int iLow, bool& iHighCompute, bool& iLowCompute, int& iHighCacheIndex, int& iLowCacheIndex, int* Cache, int cacheSize);

void svm_training(float* data, float* labels, float** p_alpha, int nObs, int nDim, float cost, float epsilon, float tolerance, float& rho) {


    //init some vars
    float cost_epsilon = cost - epsilon;
	float* dData;
    float* dKernelDiag;
    float* dLabels;
    float* dAlpha;
    float* dF;
	float* hostResult;
    float* dResult; 	
	float* hostData;
    float* hostAlpha;
    

    //set up pointer to alpha
    hostAlpha = (float *)malloc(sizeof(float)*nObs);
    *p_alpha = hostAlpha;

	//alloc the training data (aligned), then populate
	size_t dDataPitch; 
    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&dData, &dDataPitch, nDim*sizeof(float), nObs));	
	int dDataPitchSize = (int) (dDataPitch / sizeof(float));

	int hostPitchSize = nDim;
	if (dDataPitch == nDim * sizeof(float)) {
		printf("Training data is already aligned\n");
      hostData = data;
	} else {
    	hostPitchSize = dDataPitch/sizeof(float);	
		hostData = (float*)malloc(dDataPitch*nObs);
		printf("Realigning data to a pitch of %i doulbes\n", hostPitchSize);
    	for(int i=0; i<nObs; i++) {
    		for(int j=0; j<nDim; j++) 
    			hostData[i*hostPitchSize+j]=data[i*nDim+j];
		}
	}

	//Transfer data from host to device
    CUDA_CHECK_ERROR(cudaMemcpy(dData, hostData, dDataPitch*nObs, cudaMemcpyHostToDevice));
  
    //allocate other device arrays
    CUDA_CHECK_ERROR(cudaMalloc((void**)&dKernelDiag, nObs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dLabels, nObs*sizeof(float)));
    CUDA_CHECK_ERROR(cudaMemcpy(dLabels, labels, nObs*sizeof(float), cudaMemcpyHostToDevice));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dAlpha, nObs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dF, nObs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&dResult, 8*sizeof(float)));
	hostResult = (float*)malloc(8*sizeof(float));

	
	int blockWidth = intRoundUp(nObs , BLOCKSIZE);
	//blockWidth determines the number of blocks. 
    //In every block, we find the b_high_i and b_low_i based on f_high_i and f_low_i
	float* devLocalFsRL;
	float* devLocalFsRH;
	int* devLocalIndicesRL;
	int* devLocalIndicesRH;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRL, blockWidth*sizeof(float)));	
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRH, blockWidth*sizeof(float))); 
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRL, blockWidth*sizeof(int)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRH, blockWidth*sizeof(int)));

    //check for the last allocation error
	cudaError_t err = cudaGetLastError();
	if(err) { printf("Error: %s\n", cudaGetErrorString(err)); }

    //allocate blocks
    dim3 threadNum(BLOCKSIZE);
	dim3 blockNum(blockWidth);
	
    // now that memory has been allocated, move memory onto the device
	smo_init<<<blockNum, threadNum>>>(dData, dDataPitchSize, nObs, nDim, dKernelDiag, dAlpha, dF, dLabels);

	err = cudaGetLastError();
	if(err) printf("Error: %s\n", cudaGetErrorString(err));
	printf("Initialization complete\n");

    //Choose initial points
    float bLow = 1;
    float bHigh = -1;
    int iLow = -1;
    int iHigh = -1;
    for (int i = 0; i < nObs; i++) {
        if (labels[i] < 0) {
            if (iLow == -1) {
                iLow = i;
				if (iHigh > -1) { i = nObs; }
			}
		} else {
            if (iHigh == -1) {
				iHigh = i;
				if (iLow > -1) { i = nObs; }
			}
		}
	}

	printf("iLow: %d\n",iLow);
	printf("iHigh: %d\n",iHigh);

    // unroll the first step in the iteration, move results back to the host, and compute results
    dim3 singleThread(1);
    dim3 singleBlock(1);
    smo_first_iter<<<singleBlock, singleThread>>>(dResult, dKernelDiag, dData, dDataPitchSize, dAlpha, cost, nDim, iLow, iHigh);
  
    CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)dResult, 8*sizeof(float), cudaMemcpyDeviceToHost));
    float alpha20ld = *(hostResult+0);
    float alpha10ld = *(hostResult+1);
    bLow = *(hostResult+2);
    bHigh = *(hostResult+3);
    float alpha2New = *(hostResult+6);
    float alpha1New = *(hostResult+7);
    float alpha1Diff = alpha1New - alpha10ld;
    float alpha2Diff = alpha2New - alpha20ld;
 
    printf("alpha20ld: %f\n",alpha20ld);
    printf("alpha10ld: %f\n",alpha10ld);
    printf("bLow: %f\n",bLow);
    printf("bHigh: %f\n",bHigh);
    printf("alpha1Diff: %f\n",alpha1Diff);
    printf("alpha2Diff: %f\n",alpha2Diff);

    

	//This part is aimed to form the cache for the kernel
	// in order to dertermine the rowPitch
	void* temp;
	size_t rowPitch;
	CUDA_CHECK_ERROR(cudaMallocPitch(&temp, &rowPitch, nObs*sizeof(float), 2));//wha't the difference with 1 or 2 original 2
	CUDA_CHECK_ERROR(cudaFree(temp));
		
    //get device mem information
	size_t remainingMemory;
	size_t totalMemory;
	cudaMemGetInfo(&remainingMemory, &totalMemory);
    
    //compute size of cache (95% of available memory)
	size_t sizeOfCache = remainingMemory/rowPitch;
	int cacheSize = (int)((float)sizeOfCache*0.95);

    bool iLowCompute =true, iHighCompute=true;
	int iLowCacheIndex=0, iHighCacheIndex=0;
	int* Cache = (int*) malloc(cacheSize * sizeof(int));

	//initialize the empty cache
	for(int i=0; i<cacheSize; i++) Cache[i]=-1;

	 //allocate cache
  	float* dCache;
	size_t cachePitch;
	CUDA_CHECK_ERROR(cudaMallocPitch((void**)&dCache, &cachePitch, nObs*sizeof(float), cacheSize));
	int dCachePitchSize = (int)cachePitch/(sizeof(float));

	// prepare for main iteration loop
    dim3 reduceThreads(BLOCKSIZE);

    printf("Starting iterations\n");
	
	int iter;
	int sharedMemSize = 2 * nDim;
    for (iter=1; 1; iter++) {

        //determine what needs to be done for this iteration
        if (bLow <= bHigh+2*tolerance) {
            printf("Houston we have converged\n");
            break;
        }
//        printf("iteration: %d; gap: %f\n", iter, bLow - bHigh);
		
		check_cache(iHigh, iLow, iHighCompute, iLowCompute, iHighCacheIndex, iLowCacheIndex, Cache, cacheSize);

		smo_local<<<blockNum, threadNum, sharedMemSize>>>(dData, dDataPitchSize, dLabels, nObs, nDim, epsilon, cost_epsilon, dAlpha, dF, alpha1Diff *labels[iHigh], alpha2Diff*labels[iLow],  iLow,  iHigh, dCache, dCachePitchSize, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, iHighCompute, iLowCompute);
		smo_global<<<singleBlock, reduceThreads>>>(dData, dDataPitchSize, dLabels, dKernelDiag, dAlpha, dResult, cost, nDim, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, blockWidth, epsilon);

   
     
        //return result and compute alpha
        CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)dResult, 8*sizeof(float), cudaMemcpyDeviceToHost));
        alpha20ld = *(hostResult+0);
        alpha10ld = *(hostResult+1);
        bLow      = *(hostResult+2);
        bHigh     = *(hostResult+3);
        alpha2New = *(hostResult+4);
        alpha1New = *(hostResult+5);
        iLow      = (int)(*(hostResult+6));
        iHigh     = (int)(*(hostResult+7));
        alpha1Diff = alpha1New - alpha10ld;
        alpha2Diff = alpha2New - alpha20ld;


//		printf("bLow: %f\n",bLow);
//		printf("bHigh: %f\n",bHigh);
//		printf("iLow: %d\n",iLow);
//		printf("iHigh: %d\n",iHigh);
		
    }
	
	//finalization: print some statistics and get the lagrangian weights from the device
    printf("%d iterations\n", iter);
    printf("bLow: %f, bHigh: %f\n", bLow, bHigh);
	CUDA_CHECK_ERROR(cudaMemcpy((void*)hostAlpha, (void*)dAlpha, nObs*sizeof(float), cudaMemcpyDeviceToHost));

	rho = (bLow + bHigh) / 2;
	
    //free vars
	cudaFree(dData);
 	cudaFree(dKernelDiag);
 	cudaFree(dLabels);
 	cudaFree(dAlpha);
 	cudaFree(dF);
    cudaFree(hostResult);
 	cudaFree(dResult);
    cudaFree(devLocalIndicesRL);
    cudaFree(devLocalIndicesRH);
    cudaFree(devLocalFsRL);
    cudaFree(devLocalFsRH);
//    free(Cache);
	free(hostData);
}




int main(int argc, char** argv) {
    
	//get file 
    const char* training_file = argv[1];
	int inputNameLength = strlen(training_file);
	char* outputFile;
    outputFile = (char*)malloc(sizeof(char)*(inputNameLength + 10));
	char temp[] = ".model";
	strcpy(outputFile, training_file);
	strcat(outputFile, temp);

	printf("%s",outputFile);
        
    //init some stuff
	int nObs, nDim;
	float cost = 1.0f;  
	float tolerance = 1e-3f; //stopping tolerance
	float epsilon = 1e-5f;
	float rho;
    struct timeval start;
    struct timeval end;
 
	//allocate memory on host
    float* data ;
	float* labels ;
	float* alpha ;
               
	//read in file
	readSvm(training_file, &data, &labels, &nObs, &nDim);
	printf("Input data found: %d points, %d dimensions\n", nObs, nDim);

    //start timer for main svm function call
	gettimeofday(&start, 0);
    svm_training(data, labels, &alpha, nObs, nDim, cost, epsilon, tolerance, rho);
	gettimeofday(&end, 0);
    //compute training time
	float training_time = (float)(end.tv_sec - start.tv_sec) + ((float)(end.tv_usec - start.tv_usec)) * 1e-6;
    printf("Training time : %f seconds\n", training_time);

	/*
	for(int i=0; i<nObs; i++){
		if(abs(alpha[i]) > epsilon) printf("\nalpha: %f\n", hostAlpha[i]);
	}
	*/

	printModel(outputFile, alpha, labels, data, nObs, nDim, epsilon, rho);


	free(data);
	free(labels);
	free(alpha);
    return 0;
}






