#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h> 
#include "cuda_utils.h"
#include "svmIO.cpp"
#include "Cache.cpp"
#include "initialize.h"
#include "firstOrder.h"
#include "math.h"

#define BLOCKSIZE 256
#define intRoundUp(a, b) (a%b!=0)?(a/b+1):(a/b)


void svm_training(float* data, float* labels, float** p_alpha, int n_obs, int n_dim, float cost, float epsilon, float tolerance);

int main(int argc, char** argv) {
    cudaPrintfInit();
     
	//get file 
    const char* training_file = argv[1];
        
    //init some stuff
	int n_obs, n_dim;
	float cost = 5.0f;  
	float tolerance = 1e-3f;
	float epsilon = 1e-5f;
    struct timeval start;
    struct timeval end;
 
	//allocate memory on host
    float* data ;
	float* labels ;
	float* alpha ;
               
	//read in file
	readSvm(training_file, &data, &labels, &n_obs, &n_dim);
	printf("Input data found: %d points, %d dimensions\n", n_obs, n_dim);
/*
	for(int i=0; i<5;i++){
			printf("label: %f", labels[i]);
    	}
*/

    //start timer for main svm function call
	gettimeofday(&start, 0);
    svm_training(data, labels, &alpha, n_obs, n_dim, cost, epsilon, tolerance);
	gettimeofday(&end, 0);
    //compute training time
	float training_time = (float)(end.tv_sec - start.tv_sec) + ((float)(end.tv_usec - start.tv_usec)) * 1e-6;
    printf("Training time : %f seconds\n", training_time);

    cudaPrintfEnd();
	free(data);
	free(labels);
	free(alpha);
    return 0;
}



void svm_training(float* data, float* labels, float** p_alpha, int n_obs, int n_dim, float cost, float epsilon, float tolerance) {


    //init some vars
    float cost_epsilon = cost - epsilon;
	float* devData;
    float* devKernelDiag;
    float* devLabels;
    float* devAlpha;
    float* devF;
	float* hostResult;
    float* devResult; 	
	float* hostData;
    float* hostAlpha;
    int iter;

    //set up pointer to alpha
    hostAlpha = (float *)malloc(sizeof(float)*n_obs);
    *p_alpha = hostAlpha;
  
	//alloc the training data (aligned), then populate
	size_t devDataPitch; 
    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devData, &devDataPitch, n_dim*sizeof(float), n_obs));

	
	int hostPitchSize = n_dim;
	bool hostDataAlloced = false;
	    
	if (devDataPitch == n_dim * sizeof(float)) {
		printf("Training data is already aligned\n");
        hostData = data;
	} else {
    	hostPitchSize = devDataPitch/sizeof(float);	
		hostData = (float*)malloc(devDataPitch*n_obs);
		hostDataAlloced = true;
		printf("Realigning data to a pitch of %i floats\n", hostPitchSize);

    	for(int i=0; i<n_obs; i++) {
    		for(int j=0; j<n_dim; j++) 
    			hostData[i*hostPitchSize+j]=data[i*n_dim+j];
		}
	}

    CUDA_CHECK_ERROR(cudaMemcpy(devData, hostData, devDataPitch*n_obs, cudaMemcpyHostToDevice));

   int devDataPitchInFloats = (int) (devDataPitch / sizeof(float));
    
    //allocate other device arrays
    CUDA_CHECK_ERROR(cudaMalloc((void**)&devKernelDiag, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLabels, n_obs*sizeof(float)));
    CUDA_CHECK_ERROR(cudaMemcpy(devLabels, labels, n_obs*sizeof(float), cudaMemcpyHostToDevice));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devAlpha, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devF, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devResult, 8*sizeof(float)));
	hostResult = (float*)malloc(8*sizeof(float));

	//BLOCKSIZE default 256
	int blockWidth = intRoundUp(n_obs , BLOCKSIZE);

	//blockWidth determines the number of blocks. In every block, 
    //we find the b_high_i and b_low_i based on f_high_i and f_low_i
	float* devLocalFsRL;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRL, blockWidth*sizeof(float)));
	float* devLocalFsRH;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRH, blockWidth*sizeof(float))); 
	int* devLocalIndicesRL;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRL, blockWidth*sizeof(int)));
	int* devLocalIndicesRH;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRH, blockWidth*sizeof(int)));
  
	// in order to dertermine the rowPitch
	void* temp;
	size_t rowPitch;
	CUDA_CHECK_ERROR(cudaMallocPitch(&temp, &rowPitch, n_obs*sizeof(float), 2));
	CUDA_CHECK_ERROR(cudaFree(temp));
		
    //get device mem information
	size_t remainingMemory;
	size_t totalMemory;
	cudaMemGetInfo(&remainingMemory, &totalMemory);
    
    //compute size of cache (95% of available memory)
	size_t sizeOfCache = remainingMemory/rowPitch;
	sizeOfCache = (int)((float)sizeOfCache*0.95);

	if (n_obs < sizeOfCache) {
    	sizeOfCache = n_obs;
	}
	printf("%Zu bytes of memory found on device, %Zu bytes currently free\n", 
            totalMemory, remainingMemory);
	printf("%Zu rows of kernel matrix will be cached (%Zu bytes per row)\n", 
            sizeOfCache, rowPitch);
    
    //allocate cache
	float* devCache;
	size_t cachePitch;
	CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devCache, &cachePitch, n_obs*sizeof(float), sizeOfCache));
	int devCachePitchInFloats = (int)cachePitch/(sizeof(float));

    //check for the last allocation error
	cudaError_t err = cudaGetLastError();
	if(err) { printf("Error: %s\n", cudaGetErrorString(err)); }
	printf("Allocated arrays on GPU\n");



    //allocate blocks
    dim3 threadsLinear(BLOCKSIZE);
	dim3 blocksLinear(blockWidth);
	
    // now that memory has been allocated, move memory onto the device
	launchInitialization(devData, devDataPitchInFloats, n_obs, 
                         n_dim, devKernelDiag, devAlpha, devF, devLabels, 
                         blocksLinear, threadsLinear);
	err = cudaGetLastError();
	if(err) printf("Error: %s\n", cudaGetErrorString(err));
	printf("Initialization complete\n");

    //Choose initial points
    float bLow = 1;
    float bHigh = -1;
    int iLow = -1;
    int iHigh = -1;
    for (int i = 0; i < n_obs; i++) {

		printf("label: %f \n", labels[i]);

        if (labels[i] < 0) {
            if (iLow == -1) {
                iLow = i;
				if (iHigh > -1) { i = n_obs; }
        }} else {
            if (iHigh == -1) {
				iHigh = i;
				if (iLow > -1) { i = n_obs; }
    }}}

	printf("iLow: %d\n",iLow);
	printf("iHigh: %d\n",iHigh);

    // unroll the first step in the iteration, move results back to the host, and compute results
    dim3 singletonThreads(1);
    dim3 singletonBlocks(1);
    launchTakeFirstStep(devResult, devKernelDiag, devData, devDataPitchInFloats, devAlpha, cost, n_dim,
                        iLow, iHigh, singletonBlocks, singletonThreads);
  
    CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)devResult, 8*sizeof(float), cudaMemcpyDeviceToHost));

    float alpha20ld = *(hostResult+0);
    float alpha10ld = *(hostResult+1);
    bLow = *(hostResult+2);
    bHigh = *(hostResult+3);
    float alpha2New = *(hostResult+6);
    float alpha1New = *(hostResult+7);
    float alpha1Diff = alpha1New - alpha10ld;
    float alpha2Diff = alpha2New - alpha20ld;
 
//    printf("alpha20ld: %f\n",alpha20ld);
//    printf("alpha10ld: %f\n",alpha10ld);
//    printf("bLow: %f\n",bLow);
//    printf("bHigh: %f\n",bHigh);
//    printf("alpha1Diff: %f\n",alpha1Diff);
//    printf("alpha2Diff: %f\n",alpha2Diff);

    // prepare for main iteration loop
    dim3 reduceThreads(BLOCKSIZE);

    int iLowCacheIndex, iHighCacheIndex;
    bool iLowCompute, iHighCompute;
    printf("Starting iterations\n");

	Cache kernelCache(n_obs, sizeOfCache);
	
    for (iter=1; 1; iter++) {

        //determine what needs to be done for this iteration
        if (bLow <= bHigh+2*tolerance) {
            printf("Houston we have converged\n");
            break;
        }
//        printf("iteration: %d; gap: %f\n", iter, bLow - bHigh);
        
        //find kernel cache data for iHigh and iLow indices
//		 kernelCache.findData(iHigh, iHighCacheIndex, iHighCompute);
//        kernelCache.findData(iLow, iLowCacheIndex, iLowCompute);

        //launch wrapper for first order update kernel
        launchFirstOrder(true, true, n_obs, n_dim, blocksLinear, threadsLinear, reduceThreads, devData, devDataPitchInFloats, devLabels, epsilon, cost_epsilon, devAlpha, devF, alpha1Diff*labels[iHigh],  alpha2Diff*labels[iLow], iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex,  iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRH, devLocalFsRL,  devKernelDiag, devResult, cost);
     
        //return result and compute alpha
        CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)devResult, 8*sizeof(float), cudaMemcpyDeviceToHost));
        alpha20ld = *(hostResult+0);
        alpha10ld = *(hostResult+1);
        bLow      = *(hostResult+2);
        bHigh     = *(hostResult+3);
        alpha2New = *(hostResult+4);
        alpha1New = *(hostResult+5);
        iLow      = int(*(hostResult+6));
        iHigh     = int(*(hostResult+7));
        alpha1Diff = alpha1New - alpha10ld;
        alpha2Diff = alpha2New - alpha20ld;


		
//		printf("iLow: %d\n",iLow);
//		printf("iHigh: %d\n",iHigh);
        cudaPrintfDisplay(stdout,true);
		
    }
	
	//finalization: print some statistics and get the lagrangian weights from the device
    printf("%d iterations\n", iter);
    printf("bLow: %f, bHigh: %f\n", bLow, bHigh);
    kernelCache.printStatistics();
	CUDA_CHECK_ERROR(cudaMemcpy((void*)hostAlpha, (void*)devAlpha, n_obs*sizeof(float), cudaMemcpyDeviceToHost));
	
	
//	for(int i=0; i<n_obs; i++){
//		if(abs(hostAlpha[i]) > epsilon) printf("\nalpha: %f\n", hostAlpha[i]);
//	}
	
    //free vars
	cudaFree(devData);
 	cudaFree(devKernelDiag);
 	cudaFree(devLabels);
 	cudaFree(devAlpha);
 	cudaFree(devF);
    cudaFree(hostResult);
 	cudaFree(devResult);
	
	if (hostDataAlloced)
		free(hostData);
}




