/// Includes projects
#include "svmTrain.h"
#include "initialize.h"
#include "firstOrder.h"
/// Includes system
#include <windows.h>

void svmTrain::dummy() {
	train<GAUSSIAN>();
	train<LINEAR>();
	train<POLYNOMIAL>();
	train<SIGMOID>();
}

svmTrain::svmTrain(svmTrainConfig * p_config, svmTrainData * p_trainData) {
	config = p_config;
	trainingData = p_trainData;
}

void svmTrain::cleanTrainingResult() {
	free(alpha);
	free(rho);
	alpha = NULL;
}

template<KernelType type>
void svmTrain::train() {
	printf("> Cost: %f, Tolerance: %f\n", config->C, config->tau);

	// start timer
	cudaEvent_t start,stop;
	cudaEventCreate( &start );
	cudaEventCreate( &stop );
	cudaEventRecord( start, 0 );

	// allocating data
	allocate<type>();
	gpuCache->init();
	gpuCache->evaluateCacheSize(trainingData->nPoints);

	if(type == GAUSSIAN)
		config->gamma = -config->gamma;

	blocks = dim3(blockWidth);
	threads = dim3(BLOCKSIZE);

	preCompute<type>();

	devLabelsInUse = devLabels;
	devAlphaInUse = devAlphaPointer;
	for(int task = 0; task < trainingData->nTasks; task++, devAlphaInUse+=devAlphaPitchInFloats, devLabelsInUse += devLabelsPitchInFloats) {
		initTraining();

		int iLow = -1;
		int iHigh = -1;
		for (int i = 0; i < trainingData->nPoints; i++) {
			if (trainingData->oaLabels[task*trainingData->nPoints + i] < 0) { if (iLow == -1) { iLow = i; if (iHigh != -1) break; } }
			else {if (iHigh == -1) { iHigh = i; if (iLow != -1) break; } }
		}
		firstStep(iLow, iHigh); cudaThreadSynchronize();

		float sAlpha2Diff = hostResult[0];
		float sAlpha1Diff = hostResult[1];
		float bLow = 1.0f;
		float bHigh = -1.0f;

		int iteration;
		for (iteration = 1 ; bLow > bHigh + 2.0f*config->tau/*converge*/; iteration++) {
			if ((iteration & 0x7ff) == 0) {
				printf("iteration: %d; gap: %f; 2*tol: %f\n",iteration, bLow - bHigh, 2.0f*config->tau);
			}

			gpuCache->findData(iHigh, iHighCacheIndex, iHighCompute);
			gpuCache->findData(iLow, iLowCacheIndex, iLowCompute);
			
			switch(type) {
				case GAUSSIAN:
					firstOrderSMO<GAUSSIAN>	(sAlpha1Diff, iHigh, sAlpha2Diff, iLow); break;
				case LINEAR:
					firstOrderSMO<LINEAR>	(sAlpha1Diff, iHigh, sAlpha2Diff, iLow); break;
				case POLYNOMIAL: 
					firstOrderSMO<POLYNOMIAL>(sAlpha1Diff, iHigh, sAlpha2Diff, iLow); break;
				case SIGMOID:
					firstOrderSMO<SIGMOID>	(sAlpha1Diff, iHigh, sAlpha2Diff, iLow); break;
			}
			cudaThreadSynchronize();

			sAlpha2Diff = hostResult[0];
			sAlpha1Diff = hostResult[1];
			bLow = hostResult[2];
			bHigh = hostResult[3];
			iLow = *((int*)hostResult + 4);
			iHigh = *((int*)hostResult + 5);
		}
		rho[task] = (bLow + bHigh) / 2;
		printf("+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
			   "| Class %d: %d(iterations)\n"
			   "| bLow: %f, bHigh: %f\n",
				task+1, iteration, bLow, bHigh
				);
	} // for(int task = 0; task < trainingData->nTasks; task++)
	printf("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
	gpuCache->printStatistics();
	//CUDA_SAFE_CALL(cudaMemcpy2D((void*)alpha, devAlphaPointer, trainingData->nPoints*sizeof(float), cudaMemcpyDeviceToHost));
	CUDA_SAFE_CALL(cudaMemcpy2D((void*)alpha, trainingData->nPoints*sizeof(float), devAlphaPointer, devAlphaPitch, trainingData->nPoints*sizeof(float),trainingData->nTasks, cudaMemcpyDeviceToHost));

	// release device memory
	release<type>();

	// stop timer
	cudaEventRecord( stop, 0 );
	cudaEventSynchronize( stop );
	float trainingTime;
	cudaEventElapsedTime( &trainingTime, start, stop);
	cudaEventDestroy(start);
	cudaEventDestroy(stop);
	HANDLE hStdout=GetStdHandle(STD_OUTPUT_HANDLE);
	SetConsoleTextAttribute(hStdout,FOREGROUND_RED|FOREGROUND_INTENSITY);
	printf("Training time : %f seconds\n", trainingTime/1000.f);
	SetConsoleTextAttribute(hStdout,FOREGROUND_RED|FOREGROUND_GREEN|FOREGROUND_BLUE);
}

void svmTrain::initTraining() {
	initF<<<1,BLOCKSIZE>>>(
		trainingData->nPoints,
		devF,
		devLabelsInUse
		);
}

template<KernelType type> 
void svmTrain::release() {
	/// clean cache
	gpuCache->cleanCache();
	delete gpuCache;

	/// free mapping data
	cudaFreeHost(hostResult);

	/// free data allocation at device
	//cudaFree(devData);
	cudaFree(devTransposedData);
	cudaFree(devLabels);
	cudaFree(devKernelDiag);
	if(type == GAUSSIAN)
		cudaFree(devSelfDot);
	cudaFree(devAlphaPointer);
	cudaFree(devF);
	cudaFree(devLocalFsRL);
	cudaFree(devLocalFsRH);
	cudaFree(devLocalIndicesRL);
	cudaFree(devLocalIndicesRH);
	cudaFree(devLocalIndicesMaxObj);
	cudaFree(devLocalObjsMaxObj);
	/// texture
	cudaUnbindTexture(ellDataTex);
	cudaFree(devEllData);
	cudaUnbindTexture(ellRowLenTex);
	cudaFree(devEllRowLen);
}

template<KernelType type> 
void svmTrain::allocate() {
	blockWidth = (trainingData->nPoints+(BLOCKSIZE-1))/BLOCKSIZE;

	/// h_side data
	alpha = (float*)malloc(sizeof(float) * trainingData->nPoints * trainingData->nTasks);
	rho = (float*)malloc(sizeof(float) * trainingData->nTasks);
	gpuCache = new Cache_C;

	/// d_result mapping h_result
	CUDA_SAFE_CALL( cudaHostAlloc((void**)&hostResult, 8*sizeof(float),  cudaHostAllocMapped));
	CUDA_SAFE_CALL( cudaHostGetDevicePointer( (void**)&devResult, (void*)hostResult, 0));

	/// d_data, d_transposedData
	//CUDA_SAFE_CALL( cudaMallocPitch((void**)&devData, &devDataPitch, trainingData->nPoints*sizeof(float)/*in bytes*/, trainingData->nDimension));
	//CUDA_SAFE_CALL( cudaMemcpy2D(devData, devDataPitch, trainingData->data, trainingData->nPoints*sizeof(float)/*pitch of source mem*/, trainingData->nPoints*sizeof(float)/*width in bytes*/, trainingData->nDimension ,cudaMemcpyHostToDevice));
	//devDataPitchInFloats = ((int)devDataPitch)/(sizeof(float));
	CUDA_SAFE_CALL( cudaMallocPitch((void**)&devTransposedData, &devTransposedDataPitch, trainingData->nDimension*sizeof(float), trainingData->nPoints));
	CUDA_SAFE_CALL( cudaMemcpy2D(devTransposedData, devTransposedDataPitch, trainingData->transposeData, trainingData->nDimension*sizeof(float), trainingData->nDimension*sizeof(float), trainingData->nPoints, cudaMemcpyHostToDevice));
	devTransposedDataPitchInFloats = ((int)devTransposedDataPitch)/(sizeof(float));
	/// d_labels
	CUDA_SAFE_CALL( cudaMallocPitch((void**)&devLabels, &devLabelsPitch, trainingData->nPoints*sizeof(float),trainingData->nTasks));
	CUDA_SAFE_CALL( cudaMemcpy2D(devLabels, devLabelsPitch, trainingData->oaLabels, trainingData->nPoints*sizeof(float), trainingData->nPoints*sizeof(float), trainingData->nTasks, cudaMemcpyHostToDevice));
	devLabelsPitchInFloats = (int)devLabelsPitch/(sizeof(float));
	//CUDA_SAFE_CALL( cudaMalloc((void**)&devLabels, trainingData->nPoints*sizeof(float)));
	//CUDA_SAFE_CALL( cudaMemcpy(devLabels, trainingData->labels, trainingData->nPoints*sizeof(float), cudaMemcpyHostToDevice));
	/// d_diagonal K
	CUDA_SAFE_CALL( cudaMalloc((void**)&devKernelDiag, trainingData->nPoints*sizeof(float)));
	/// d_selfDots when kerneltype == GAUSSIAN
	if(type == GAUSSIAN)
		CUDA_SAFE_CALL( cudaMalloc((void**)&devSelfDot, trainingData->nPoints*sizeof(float)));
	/// d_alpha
	CUDA_SAFE_CALL( cudaMallocPitch((void**)&devAlphaPointer,&devAlphaPitch, trainingData->nPoints*sizeof(float), trainingData->nTasks));
	CUDA_SAFE_CALL( cudaMemset2D(devAlphaPointer, devAlphaPitch,0,devAlphaPitch,trainingData->nTasks));
	devAlphaPitchInFloats = (int)devAlphaPitch/(sizeof(float));
	//CUDA_SAFE_CALL( cudaMalloc((void**)&devAlpha, trainingData->nPoints*sizeof(float)));
	//CUDA_SAFE_CALL( cudaMemset((void*)devAlpha, 0, trainingData->nPoints*sizeof(float)));
	/// d_Fi
	CUDA_SAFE_CALL( cudaMalloc((void**)&devF, trainingData->nPoints*sizeof(float)));
	/// d_LocalFs d_LocalIndices
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalFsRL, blockWidth*sizeof(float)));
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalFsRH, blockWidth*sizeof(float)));
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalIndicesRL, blockWidth*sizeof(int)));
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalIndicesRH, blockWidth*sizeof(int)));
	/// d_global Obj Indices
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalIndicesMaxObj, blockWidth*sizeof(int)));
	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalObjsMaxObj, blockWidth*sizeof(float)));

	/// d_ELLPACK binding to texture
	CUDA_SAFE_CALL(cudaMallocPitch((void**) &devEllData, &devEllDataPitch, trainingData->nPoints*sizeof(int2), trainingData->ellWidth));
	CUDA_SAFE_CALL(cudaMemcpy2D(devEllData, devEllDataPitch, trainingData->ellData, trainingData->nPoints*sizeof(int2), trainingData->nPoints*sizeof(int2), trainingData->ellWidth, cudaMemcpyHostToDevice));
	cudaChannelFormatDesc channelDescEllEntry = cudaCreateChannelDesc<int2>();
	ellDataTex.normalized = 0;
	ellDataTex.filterMode = cudaFilterModePoint;
	ellDataTex.addressMode[0] = cudaAddressModeClamp;
	ellDataTex.addressMode[1] = cudaAddressModeClamp;
	CUDA_SAFE_CALL(cudaBindTexture2D(NULL, ellDataTex, devEllData, channelDescEllEntry, trainingData->nPoints, trainingData->ellWidth, devEllDataPitch));

	CUDA_SAFE_CALL(cudaMalloc((void**) &devEllRowLen, trainingData->nPoints*sizeof(int)));
	CUDA_SAFE_CALL(cudaMemcpy(devEllRowLen, trainingData->ellRowLen, trainingData->nPoints*sizeof(int), cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaBindTexture(NULL, ellRowLenTex, devEllRowLen, trainingData->nPoints*sizeof(int)));
}

template<KernelType type> 
void svmTrain::preCompute() {
	preComputeKernel<type><<<blocks, threads>>>(
		//devData,
		//devDataPitchInFloats,
		trainingData->nPoints,
		//trainingData->nDimension,
		config->gamma,
		config->coef,
		config->degree,
		devKernelDiag,
		devSelfDot);
}

void svmTrain::firstStep(int iLow,int iHigh) {
	switch (config->kType) {
		case LINEAR:
			takeFirstStep<<<1, 1>>>(
				devResult, devKernelDiag,
				//devData, devDataPitchInFloats,
				//devTransposedData, devTransposedDataPitchInFloats,
				devTransposedData + iLow*devTransposedDataPitchInFloats,
				devAlphaInUse,
				config->C,
				iLow, iHigh,
				config->gamma,
				config->coef,
				config->degree,
				devSelfDot,
				Linear());
			break;
		case POLYNOMIAL:
			takeFirstStep<<<1, 1>>>(
				devResult, devKernelDiag,
				//devData, devDataPitchInFloats,
				//devTransposedData, devTransposedDataPitchInFloats,
				devTransposedData + iLow*devTransposedDataPitchInFloats,
				devAlphaInUse,
				config->C,
				iLow, iHigh,
				config->gamma,
				config->coef,
				config->degree,
				devSelfDot,
				Polynomial());
			break;
		case GAUSSIAN:
			takeFirstStep<<<1, 1>>>(
				devResult, devKernelDiag,
				//devData, devDataPitchInFloats,
				//devTransposedData, devTransposedDataPitchInFloats,
				devTransposedData + iLow*devTransposedDataPitchInFloats,
				devAlphaInUse,
				config->C,
				iLow, iHigh,
				config->gamma, 
				config->coef,
				config->degree,
				devSelfDot,
				Gaussian());
			break;  
		case SIGMOID:
			takeFirstStep<<<1, 1>>>(
				devResult, devKernelDiag,
				//devData, devDataPitchInFloats,
				//devTransposedData, devTransposedDataPitchInFloats,
				devTransposedData + iLow*devTransposedDataPitchInFloats,
				devAlphaInUse,
				config->C,
				iLow, iHigh,
				config->gamma,
				config->coef,
				config->degree,
				devSelfDot,
				Sigmoid());
			break;
	}
}

inline int svmTrain::firstOrderShareSize() {
	int size = 0;
	if (iHighCompute) size+= sizeof(float) * trainingData->nDimension;
	if (iLowCompute) size+= sizeof(float) * trainingData->nDimension;
	return size;
}

template<KernelType type>
void svmTrain::firstOrderSMO(float sAlpha1Diff, int iHigh, float sAlpha2Diff, int iLow) {
	int shareSize = firstOrderShareSize();

	if(type == GAUSSIAN) {
		if(iLowCompute) {
			if(iHighCompute)
				//firstOrder <true, true><<<blocks, threads, shareSize>>>(devTransposedData, devTransposedDataPitchInFloats, trainingData->nPoints, trainingData->nDimension, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, Gaussian());
				firstOrder <true, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Gaussian());
			else
				//firstOrder <true, false><<<blocks, threads, shareSize>>>(devTransposedData, devTransposedDataPitchInFloats, trainingData->nPoints, trainingData->nDimension, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, Gaussian());
				firstOrder <true, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Gaussian());
		}
		else {
			if(iHighCompute)
				//firstOrder <false, true><<<blocks, threads, shareSize>>>(devTransposedData, devTransposedDataPitchInFloats, trainingData->nPoints, trainingData->nDimension, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, Gaussian());
				firstOrder <false, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Gaussian());
			else
				//firstOrder <false, false><<<blocks, threads, shareSize>>>(devTransposedData, devTransposedDataPitchInFloats, trainingData->nPoints, trainingData->nDimension, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, Gaussian());
				firstOrder <false, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Gaussian());
		}
	} else if(type == LINEAR) {
		if(iLowCompute) {
			if(iHighCompute)
				firstOrder <true, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Linear());
			else
				firstOrder <true, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Linear());
		}
		else {
			if(iHighCompute)
				firstOrder <false, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Linear());
			else
				firstOrder <false, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Linear());
		}
	} else if(type == POLYNOMIAL) {
		if(iLowCompute) {
			if(iHighCompute)
				firstOrder <true, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Polynomial());
			else
				firstOrder <true, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Polynomial());
		}
		else {
			if(iHighCompute)
				firstOrder <false, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Polynomial());
			else
				firstOrder <false, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Polynomial());
		}
	} else if(type == SIGMOID) {
		if(iLowCompute) {
			if(iHighCompute)
				firstOrder <true, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Sigmoid());
			else
				firstOrder <true, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Sigmoid());
		}
		else {
			if(iHighCompute)
				firstOrder <false, true><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Sigmoid());
			else
				firstOrder <false, false><<<blocks, threads, shareSize>>>(/*devData, devDataPitchInFloats, */devTransposedData, devTransposedDataPitchInFloats, devLabelsInUse, trainingData->nPoints, trainingData->nDimension, devAlphaInUse, devF, sAlpha1Diff, sAlpha2Diff, iLow, iHigh, config->gamma, config->coef, config->degree, gpuCache->devCache, gpuCache->cachePitchInFloat, iLowCacheIndex, iHighCacheIndex, devLocalIndicesRL, devLocalIndicesRH, devLocalFsRL, devLocalFsRH, devKernelDiag, devSelfDot, devResult, config->C, Sigmoid());
		}
	}
	
}

void svmTrain::printModel() {
	printf("--------------------[Saving Model]--------------------\n");
	printf("> Output File: %s\n", config->outputFilename);
	FILE* outputFilePointer = fopen(config->outputFilename, "w");
	if (outputFilePointer == NULL) {
		printf("Can't write %s\n", config->outputFilename);
		exit(1);
	}

	unsigned int * noneZero = (unsigned int *)malloc(trainingData->nPoints*sizeof(unsigned int));
	memset(noneZero, 0, trainingData->nPoints*sizeof(unsigned int));

	for(int i=0;i<trainingData->nTasks;i++) {
		for(int j=0;j<trainingData->nPoints;j++) {
			if(alpha[i*trainingData->nPoints+j] > 0.0f)
				noneZero[j]++;
		}
	}

	int nSV = 0;
	for(int i=0;i<trainingData->nPoints;i++) {
		if(noneZero[i]!=0)
			nSV ++;
	}
	printf("> SV %d out of %d\n",nSV, trainingData->nPoints);

	if (config->kType == GAUSSIAN)
		config->gamma = -config->gamma;

	fprintf(outputFilePointer, "svm_type c_svc\n");
	fprintf(outputFilePointer, "kernel_type %s\n", kernelTypeString[config->kType]);
	if (config->kType == POLYNOMIAL)
		fprintf(outputFilePointer, "degree %i\n", (int)config->degree);

	if (config->kType == POLYNOMIAL || config->kType == GAUSSIAN || config->kType == SIGMOID)
		fprintf(outputFilePointer, "gamma %f\n", config->gamma);
	
	if (config->kType == POLYNOMIAL || config->kType == SIGMOID)
		fprintf(outputFilePointer, "coef0 %f\n", config->coef);
	
	fprintf(outputFilePointer, "nr_class %d\n", trainingData->nClass);
	//fprintf(outputFilePointer, "total_sv %d\n", nSV + pSV);
	fprintf(outputFilePointer, "total_sv %d\n", nSV);

	{
		fprintf(outputFilePointer, "rho");
		for(int i=0;i<trainingData->nTasks;i++)
			fprintf(outputFilePointer, " %g",rho[i]);
		fprintf(outputFilePointer,"\n");
	}
	
	{
		fprintf(outputFilePointer, "label");
		for(trainingData->classIt = trainingData->classes.begin(); trainingData->classIt!= trainingData->classes.end();trainingData->classIt++)
			fprintf(outputFilePointer," %d",trainingData->classIt->first);
		fprintf(outputFilePointer,"\n");
	}

	//fprintf(outputFilePointer, "nr_sv %d %d\n", pSV, nSV);
	fprintf(outputFilePointer, "SV\n");
	for (int i = 0; i < trainingData->nPoints; i++) {
		if(noneZero[i]!=0) {
			for(int j=0;j<trainingData->nTasks;j++)
				fprintf(outputFilePointer, "%.10g ", trainingData->oaLabels[j*trainingData->nPoints+i]*alpha[j*trainingData->nPoints+i]);

			for (int j = 0; j < trainingData->nDimension; j++) {
				//if(trainingData->data[j*trainingData->nPoints + i] > 0.0f)
					fprintf(outputFilePointer, "%d:%.8g ", j+1, trainingData->data[j*trainingData->nPoints + i]);
			}
			fprintf(outputFilePointer, "\n");
		}
	}

	fclose(outputFilePointer);
	free(noneZero);
	printf("> DONE.");
}