#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h> 
#include "cuda_utils.h"
#include "cuPrintf.cu"
#include "svmIO.cpp"
#include "Cache.cpp"
#include "initialize.h"
#include "firstOrder.h"

#define intRoundUp(a, b) (a%b!=0)?(a/b+1):(a/b)

void svm_training(float* data, float* transposedData, float* labels, float** p_alpha, int n_obs, int n_dim, float cost, float epsilon, float tolerance) {


    //init some vars
    float cost_epsilon = cost - epsilon;
	float* devData;
    float* devTransposedData;
    float* devKernelDiag;
    float* devLabels;
    float* devAlpha;
    float* devF;
	float* hostResult;
    void* devResult; //why void?
	size_t devDataPitch; 
    size_t devTransposedDataPitch;
	int hostPitchSize = n_obs;
	bool hostDataAlloced = false;
	float* hostData;
    float* hostAlpha;
    int iter;

    //set up pointer to alpha
    hostAlpha = (float *)malloc(sizeof(float)*n_obs);
    *p_alpha = hostAlpha;
  
	//alloc the training data (aligned), then populate
    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devData, &devDataPitch, n_obs*sizeof(float), n_dim));
    

    float* devData1;
    size_t devDataPitch1;	
    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devData1, &devDataPitch1, n_dim*sizeof(float), n_obs));
	
	
    
	if (devDataPitch == n_obs * sizeof(float)) {
		printf("Training data is already aligned\n");
        hostData = data;
	} else {
    	hostPitchSize = devDataPitch/sizeof(float);	
		hostData = (float*)malloc(devDataPitch*n_dim);
		hostDataAlloced = true;
		printf("Realigning data to a pitch of %i floats\n", hostPitchSize);

    	for(int i=0; i<n_dim;i++) {
    		for(int j=0; j<n_obs;j++) 
    			hostData[i*hostPitchSize+j]=data[i*n_obs+j];
    }}
    CUDA_CHECK_ERROR(cudaMemcpy(devData, hostData, devDataPitch*n_dim,
                                cudaMemcpyHostToDevice));

    //alloc transposed data (aligned), then populate
    bool transposedDataAlloced = false;
    if (transposedData == 0) {
        transposedData = (float *)malloc(n_obs*n_dim*sizeof(float));
        transposedDataAlloced = true;
        for (int i=0; i<n_obs; i++) {
            for (int j=0; j<n_dim; j++) {
                transposedData[i*n_dim+j] = hostData[j*hostPitchSize+i];
    }}}

    CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devTransposedData, &devTransposedDataPitch,
                                     n_dim*sizeof(float), n_obs));
	CUDA_CHECK_ERROR(cudaMemcpy2D(devTransposedData, devTransposedDataPitch, transposedData,
                                  n_dim*sizeof(float), n_dim*sizeof(float), n_obs,
                                  cudaMemcpyHostToDevice));
    
    //allocate other device arrays
    CUDA_CHECK_ERROR(cudaMalloc((void**)&devKernelDiag, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLabels, n_obs*sizeof(float)));
    CUDA_CHECK_ERROR(cudaMemcpy(devLabels, labels, n_obs*sizeof(float), cudaMemcpyHostToDevice));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devAlpha, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devF, n_obs*sizeof(float)));
	CUDA_CHECK_ERROR(cudaMalloc(&devResult, 8*sizeof(float)));
	hostResult = (float*)malloc(8*sizeof(float));

	//blockwidth? how to choose BLOCKSIZE?
		int blockWidth = intRoundUp(n_obs , BLOCKSIZE);

	//blockWidth determines the number of blocks. In every block, 
    //we find the b_high_i and b_low_i based on f_high_i and f_low_i
	float* devLocalFsRL;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRL, blockWidth*sizeof(float)));
	float* devLocalFsRH;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalFsRH, blockWidth*sizeof(float))); 
	int* devLocalIndicesRL;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRL, blockWidth*sizeof(int)));
	int* devLocalIndicesRH;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesRH, blockWidth*sizeof(int)));

	//???
	float* devLocalObjsMaxObj;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalObjsMaxObj, blockWidth*sizeof(float)));
	int* devLocalIndicesMaxObj;
	CUDA_CHECK_ERROR(cudaMalloc((void**)&devLocalIndicesMaxObj, blockWidth*sizeof(int)));
  
	// in order to dertermine the rowPitch
	void* temp;
	size_t rowPitch;
	CUDA_CHECK_ERROR(cudaMallocPitch(&temp, &rowPitch, n_obs*sizeof(float), 2));
	CUDA_CHECK_ERROR(cudaFree(temp));
		
    //get device mem information
	size_t remainingMemory;
	size_t totalMemory;
	cudaMemGetInfo(&remainingMemory, &totalMemory);
    
    //compute size of cache (95% of available memory)
	size_t sizeOfCache = remainingMemory/rowPitch;
	sizeOfCache = (int)((float)sizeOfCache*0.95);

	

	if (n_obs < sizeOfCache) {
    	sizeOfCache = n_obs;
	}
	printf("%Zu bytes of memory found on device, %Zu bytes currently free\n", 
            totalMemory, remainingMemory);
	printf("%Zu rows of kernel matrix will be cached (%Zu bytes per row)\n", 
            sizeOfCache, rowPitch);
    
    //allocate cache
	float* devCache;
	size_t cachePitch;
	CUDA_CHECK_ERROR(cudaMallocPitch((void**)&devCache, &cachePitch, 
                                    n_obs*sizeof(float), sizeOfCache));
	//cudaMemset2D(devCache, cachePitch, 0x00, n_obs*sizeof(float), sizeOfCache);
	Cache kernelCache(n_obs, sizeOfCache);
	int devCachePitchInFloats = (int)cachePitch/(sizeof(float));

    //check for the last allocation error
	cudaError_t err = cudaGetLastError();
	if(err) { printf("Error: %s\n", cudaGetErrorString(err)); }
	printf("Allocated arrays on GPU\n");

    //allocate blocks
    dim3 threadsLinear(BLOCKSIZE);
	dim3 blocksLinear(blockWidth);

	// sizeof(float) = sizeof(int)*4
	int devDataPitchInFloats = ((int)devDataPitch) >> 2;
    int devTransposedDataPitchInFloats = ((int)devTransposedDataPitch) >> 2;
	
    // now that memory has been allocated, move memory onto the device
	launchInitialization(devData, devDataPitchInFloats, n_obs, 
                         n_dim, devKernelDiag, devAlpha, devF, devLabels, 
                         blocksLinear, threadsLinear);
	err = cudaGetLastError();
	if(err) printf("Error: %s\n", cudaGetErrorString(err));
	printf("Initialization complete\n");

    //Choose initial points
    float bLow = 1;
    float bHigh = -1;
    int iteration = 0;
    int iLow = -1;
    int iHigh = -1;
    for (int i = 0; i < n_obs; i++) {
        if (labels[i] < 0) {
            if (iLow == -1) {
                iLow = i;
				if (iHigh > -1) { i = n_obs; }
        }} else {
            if (iHigh == -1) {
				iHigh = i;
				if (iLow > -1) { i = n_obs; }
    }}}

    // unroll the first step in the iteration, move results back to the host, and compute results
    dim3 singletonThreads(1);
    dim3 singletonBlocks(1);
    launchTakeFirstStep(devResult, devKernelDiag, devData, devDataPitchInFloats, devAlpha, cost, n_dim,
                        iLow, iHigh, singletonBlocks, singletonThreads);
  
    CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)devResult, 8*sizeof(float), cudaMemcpyDeviceToHost));
    float alpha20ld = *(hostResult+0);
    float alpha10ld = *(hostResult+1);
    bLow = *(hostResult+2);
    bHigh = *(hostResult+3);
    float alpha2New = *(hostResult+6);
    float alpha1New = *(hostResult+7);
    float alpha1Diff = alpha1New - alpha10ld;
    float alpha2Diff = alpha2New - alpha20ld;
 
    printf("alpha20ld: %f\n",alpha20ld);
    printf("alpha10ld: %f\n",alpha10ld);
    printf("bLow: %f\n",bLow);
    printf("bHigh: %f\n",bHigh);
    printf("alpha1Diff: %f\n",alpha1Diff);
    printf("alpha2Diff: %f\n",alpha2Diff);

    // prepare for main iteration loop
    dim3 reduceThreads(BLOCKSIZE);
    int iLowCacheIndex, iHighCacheIndex;
    bool iLowCompute, iHighCompute;
    printf("Starting iterations\n");
 /*
    for (iter=1; 1; iter++) {

        //determine what needs to be done for this iteration
        if (bLow-bHigh <= 2*tolerance) {
            printf("Houston we have converged\n");
            break;
        }
        printf("iteration: %d; gap: %f\n", iter, bLow - bHigh);
        
        //find kernel cache data for iHigh and iLow indices
        kernelCache.findData(iHigh, iHighCacheIndex, iHighCompute);
        kernelCache.findData(iLow, iLowCacheIndex, iLowCompute);

        //launch wrapper for first order update kernel
        launchFirstOrder(iLowCompute, iHighCompute, n_obs, n_dim, blocksLinear, threadsLinear, reduceThreads, devData, devDataPitchInFloats, devTransposedData, devTransposedDataPitchInFloats, devLabels, epsilon, cost_epsilon, devAlpha, devF, alpha1Diff*labels[iHigh], alpha2Diff*labels[iLow], iLow, iHigh, devCache, devCachePitchInFloats, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRH, devLocalFsRL, devKernelDiag, devResult, cost);
     
        //return result and compute alpha
        CUDA_CHECK_ERROR(cudaMemcpy((void*)hostResult, (void*)devResult, 8*sizeof(float), cudaMemcpyDeviceToHost));
        alpha20ld = *(hostResult+0);
        alpha10ld = *(hostResult+1);
        bLow      = *(hostResult+2);
        bHigh     = *(hostResult+3);
        alpha2New = *(hostResult+4);
        alpha1New = *(hostResult+5);
        iLow      = *((int*)hostResult+6);
        iHigh     = *((int*)hostResult+7);
        alpha1Diff = alpha1New - alpha10ld;
        alpha2Diff = alpha2New - alpha20ld;
    }
*/
	//finalization: print some statistics and get the lagrangian weights from the device
    printf("%d iterations\n", iter);
    printf("bLow: %f, bHigh: %f\n", bLow, bHigh);
    kernelCache.printStatistics();
	CUDA_CHECK_ERROR(cudaMemcpy((void*)hostAlpha, (void*)devAlpha, n_obs*sizeof(float), cudaMemcpyDeviceToHost));
	
    //free vars
	cudaFree(devData);
 	cudaFree(devTransposedData);
 	cudaFree(devKernelDiag);
 	cudaFree(devLabels);
 	cudaFree(devAlpha);
 	cudaFree(devF);
    cudaFree(hostResult);
 	cudaFree(devResult);
	
	if (hostDataAlloced)
		free(hostData);
}

int main(int argc, char** argv) {
    //get file arg
    const char* training_file = argv[1];
        
    //init some stuff
    cudaPrintfInit();
	int n_obs, n_dim;
	float cost = 10.0f;  
	float tolerance = 1e-3f;
	float epsilon = 1e-5f;
    struct timeval start;
    struct timeval end;
 
	//allocate memory on host
    float* data ;
	float* labels ;
	float* alpha ;
    float* transposedData;
        
        
	   //read in file
//		readSvm(training_file, &data, &labels, &n_obs, &n_dim, &transposedData);
	readSvm(training_file, &transposedData, &labels, &n_obs, &n_dim, &data);
/*		printf("Input data found: %d points, %d dimensions\n", n_obs, n_dim);

		for(int i =0; i<5; i++){
			for(int j=0; j<n_dim; j++){
				printf("%d: %f,  ", j, data[i*n_dim + j]);
			}
			printf("\n");
		}
		
		for(int i=0; i<5;i++){
			printf("label: %f", labels[i]);
    	}


	n_obs = 10;
	n_dim = 5;

    data = (float*)malloc(sizeof(float)*n_obs*n_dim);
    labels = (float*)malloc(sizeof(float)*n_obs);
    transposedData = (float*)malloc(sizeof(float)*n_obs*n_dim);
	alpha = (float*)malloc(sizeof(float)*n_obs);

	for(int i=0; i<n_obs; i++){
		for(int j=0; j<n_dim; j++){
			data[i*n_dim + j] =999;
	}}
	for(int i=0; i<5;i++){
		labels[i]=1;
	}
    for(int i=5; i<10;i++){
		labels[i]=-1;
	}
	for(int i=0; i<n_obs; i++){
		for(int j=0; j<n_dim; j++){
			transposedData[i*n_dim + j] =999;
	}}
*/
    //start timer for main svm function call
	gettimeofday(&start, 0);
    svm_training(data, transposedData, labels, &alpha, n_obs, n_dim, cost, epsilon, tolerance);
	gettimeofday(&end, 0);
    
    //display printed stuff from inside the kernel
    cudaPrintfDisplay(stdout, true);

    //compute training time
	float training_time = (float)(end.tv_sec - start.tv_sec) + ((float)(end.tv_usec - start.tv_usec)) * 1e-6;
    printf("Training time : %f seconds\n", training_time);

	// free vars and close shop
    cudaPrintfEnd();
/*
	for(int i=0; i<n_obs;i++){
		printf("%f", alpha[i]);
	}
*/
	free(data);
	free(labels);
	free(alpha);
    return 0;
}
