/// Includes, system 
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
/// Includes, cuda
#include <cublas_v2.h>
#include "cutil.h"
#include <cuda.h>
/// Includes, project
#include "../common/framework.h"
#include "../common/deviceSelect.h"
#include "svmClassify.h"
#include "svmClassifyKernels.h"

void svmClassify::classify() {
	/// get max memory pitch
	{ 
		int deviceID = chooseLargestGPU(true);
		cudaDeviceProp deviceProp;
        cudaGetDeviceProperties(&deviceProp, deviceID);
		maxPitch = deviceProp.memPitch;
	}
	// start timer
	cudaEvent_t start,stop;
	cudaEventCreate( &start );
	cudaEventCreate( &stop );
	cudaEventRecord( start, 0 );
	/// allocate device and host memory space
	allocate();
	/// for the RBF kernel, calculate ||sv_j||^2 in devNorms_(i, j) = -gamma*(||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j)
	if(config->kType == GAUSSIAN) {
		makeSelfDots<<<(config->nSV+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>
			(devSV, devSVPitchInFloats, devSVDots, config->nSV, config->SVDim);
	}
	/// divide testpoint into several slices to fit GPU memory
	for(int dataoffset=0; dataoffset < testData->nPoints; dataoffset += nPointsInSlice) {
		/// at last slice, fit the size for alignment
		if(dataoffset+nPointsInSlice > testData->nPoints)
			resetDevData(testData->nPoints - dataoffset);
		/// copy testpoint data from host to device
		if(testData->nPoints*sizeof(float) < maxPitch) {	
			CUDA_SAFE_CALL(cudaMemcpy2D(devData, devDataPitch, testData->data+dataoffset, testData->nPoints*sizeof(float), nPointsInSlice*sizeof(float), testData->nDimension, cudaMemcpyHostToDevice));
		} else {
			for(int nd=0 ; nd<testData->nDimension ; nd++) { /// copy data row by row (data matrix is row major)
				CUDA_SAFE_CALL(cudaMemcpy(devData+nd*devDataPitchInFloats, testData->data+nd*testData->nPoints+dataoffset, nPointsInSlice*sizeof(float), cudaMemcpyHostToDevice));	
			}
		}
		/// for the RBF kernel, devNorms_(i, j) = -gamma*(||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j)
		if(config->kType == GAUSSIAN) {
			/// calculate ||data_i||^2
			makeSelfDots<<<(nPointsInSlice+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>
				(devData, devDataPitchInFloats, devDataDots, nPointsInSlice, testData->nDimension);
			/// store ||data_i||^2 + ||sv_j||^2 int devDots
			makeDots<<<dim3((config->nSV+BLOCKSIZE-1)/BLOCKSIZE, (nPointsInSlice+BLOCKSIZE-1)/BLOCKSIZE), BLOCKSIZE, sizeof(float)*BLOCKSIZE>>>
				(devDots, devDotsPitchInFloats, devSVDots, devDataDots, config->nSV, nPointsInSlice);
		}
		/// calculate data_i (dot) sv_j, stored in devDots; for rbf kernel calculate ||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j; each kernel with several parameter
		//cublasSgemm('n', 't', config->nSV/*m*/, nPointsInSlice/*n*/, config->SVDim/*k*/, sgemmAlpha, devSV/*op(A)*/, devSVPitchInFloats, devData/*op(B)*/, devDataPitchInFloats, sgemmBeta, devDots/*op(C)*/, devDotsPitchInFloats);
		cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, config->nSV/*m*/, nPointsInSlice/*n*/, config->SVDim/*k*/, &sgemmAlpha, devSV/*op(A)*/, devSVPitchInFloats, devData/*op(B)*/, devDataPitchInFloats, &sgemmBeta, devDots/*op(C)*/, devDotsPitchInFloats);
		/// calculate kernel value
		switch(config->kType) {
			case GAUSSIAN:
				computeK<GAUSSIAN><<<1,BLOCKSIZE>>>(devDots, devDotsPitchInFloats, config->nSV, 0.0f, 0.0f);
				break;
			case POLYNOMIAL:
				computeK<POLYNOMIAL><<<1,BLOCKSIZE>>>(devDots, devDotsPitchInFloats, config->nSV, config->coef, config->degree);
				break;
			case SIGMOID:
				computeK<SIGMOID><<<1,BLOCKSIZE>>>(devDots, devDotsPitchInFloats, config->nSV, config->coef, 0.0f);
				break;
		}

		mapReduce();

		findClass(dataoffset);
	}
	/// free device memeory
	release();
	/// stop timer
	cudaEventRecord( stop, 0 );
	cudaEventSynchronize( stop );
	float classifingTime;
	cudaEventElapsedTime( &classifingTime, start, stop);
	cudaEventDestroy(start);
	cudaEventDestroy(stop);
	printf("> Classification time : %f seconds\n", classifingTime/1000.f);
}

inline void svmClassify::mapReduce() {
	int sharedSize = sizeof(float)*(BLOCKSIZE);
	computeKernelsReduce<BLOCKSIZE><<<dim3(nPointsInSlice,config->nTasks), BLOCKSIZE, sharedSize>>>(
		devDots,
		devDotsPitchInFloats,
		devAlpha,
		devAlphaPitchInFloats,
		config->nSV,
		devResult,
		devResultPitchInFloats,
		devRho);
}

inline void svmClassify::findClass(int dataoffset) {
	if(config->nTasks == 1)
		argmax<true><<<(nPointsInSlice+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>> (
			devResult,
			devResultPitchInFloats,
			nPointsInSlice,
			1);
	else
		argmax<false><<<(nPointsInSlice+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>> (
			devResult,
			devResultPitchInFloats,
			nPointsInSlice,
			config->nTasks);
	cudaMemcpy(result+dataoffset, devResult+0*devResultPitchInFloats, nPointsInSlice*sizeof(float), cudaMemcpyDeviceToHost);
}

inline void svmClassify::resetDevData(unsigned int _sliceSize) {
	nPointsInSlice = _sliceSize;

	CUDA_SAFE_CALL(cudaFree(devResult));
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devResult, &devResultPitch, nPointsInSlice*sizeof(float), config->nTasks));
	devResultPitchInFloats = ((int)devResultPitch) / sizeof(float);

	CUDA_SAFE_CALL(cudaFree(devData));
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devData, &devDataPitch, nPointsInSlice*sizeof(float), testData->nDimension));
	devDataPitchInFloats = ((int)devDataPitch) / sizeof(float);
}

void svmClassify::cleanResult() {
	//free(result);
	CUDA_SAFE_CALL( cudaFreeHost( result));
}

inline void svmClassify::allocate() {
	/// result
	//result = (float*)malloc(testData->nPoints*sizeof(float));
	CUDA_SAFE_CALL( cudaHostAlloc( (void**)&result,testData->nPoints*sizeof(float), cudaHostAllocDefault));
	/// devSV;
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devSV, &devSVPitch, config->nSV*sizeof(float), config->SVDim));
	CUDA_SAFE_CALL(cudaMemcpy2D(devSV, devSVPitch, config->supportVectors, config->nSV*sizeof(float), config->nSV*sizeof(float), config->SVDim, cudaMemcpyHostToDevice));
	devSVPitchInFloats = ((int)devSVPitch) / sizeof(float);
	/// devAlpha
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devAlpha, &devAlphaPitch, config->nSV*sizeof(float), config->nTasks));
	CUDA_SAFE_CALL(cudaMemcpy2D(devAlpha, devAlphaPitch, config->alpha, config->nSV*sizeof(float), config->nSV*sizeof(float), config->nTasks, cudaMemcpyHostToDevice));
	devAlphaPitchInFloats = (int)devAlphaPitch / sizeof(float);
	/// devSVDots
	if(config->kType == GAUSSIAN)
		CUDA_SAFE_CALL(cudaMalloc((void**)&devSVDots, sizeof(float)*config->nSV));
	/// devRho
	CUDA_SAFE_CALL(cudaMalloc((void**)&devRho, sizeof(float)*config->nTasks));
	CUDA_SAFE_CALL(cudaMemcpy(devRho, config->rho, sizeof(float)*config->nTasks, cudaMemcpyHostToDevice));
	
	/// evaluate slice size
	{
		size_t free_memory,total_memory;
		CUDA_SAFE_CALL(cudaMemGetInfo(&free_memory,&total_memory));
		int free_memory_floats = (int)free_memory/sizeof(float);
		free_memory_floats = (int)(0.9f * free_memory_floats); 

		//int nBlocksSV = (config->nSV+(BLOCKSIZE-1))/BLOCKSIZE;
		//nPointsInSlice = ((free_memory_floats-devSVPitchInFloats*config->SVDim-config->nSV-config->nSV)/(testData->nDimension+1+devSVPitchInFloats+1+nBlocksSV));
		nPointsInSlice = ((free_memory_floats)/(testData->nDimension/*devData*/+1/*devDataDots*/+config->nTasks/*devResult*/+config->nSV/*devDots*/));
		nPointsInSlice = (nPointsInSlice>>7)<<7;//for pitch limitations assigning to be a multiple of 128

		nPointsInSlice = std::min(nPointsInSlice, testData->nPoints);			/// for few points
		nPointsInSlice = std::min(nPointsInSlice, (int)(maxPitch/sizeof(float))-2);		/// for too many points
	}

	/// devData
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devData, &devDataPitch, nPointsInSlice*sizeof(float), testData->nDimension));
	devDataPitchInFloats = ((int)devDataPitch) / sizeof(float);
	/// devDataDots
	if(config->kType == GAUSSIAN)
		CUDA_SAFE_CALL(cudaMalloc((void**)&devDataDots, sizeof(float)*nPointsInSlice));
	/// devResult
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devResult, &devResultPitch, nPointsInSlice*sizeof(float), config->nTasks));
	devResultPitchInFloats = ((int)devResultPitch) / sizeof(float);
	/// devDots
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devDots, &devDotsPitch, config->nSV*sizeof(float), nPointsInSlice));
	devDotsPitchInFloats = ((int)devDotsPitch) / sizeof(float);
	/// initiate CUDA BLAS
	//cublasStatus status = cublasInit();
	cublasStatus_t status = cublasCreate(&handle);
	if (status != CUBLAS_STATUS_SUCCESS) {
		printf("CUBLAS_v2 initialization error\n");
		exit(EXIT_FAILURE);
	}
	/// set parameter for cublassgemm
	if(config->kType == GAUSSIAN) {
		sgemmAlpha = 2.0f*config->gamma;
		sgemmBeta = -config->gamma;
	} else {
		sgemmAlpha = config->gamma;
		sgemmBeta = 0.0f;
	}

	//for(int i=0;i<config->nTasks;i++)
	//	cudaStreamCreate(stream+i);
}

inline void svmClassify::release() {
	CUDA_SAFE_CALL(cudaFree(devSV));
	CUDA_SAFE_CALL(cudaFree(devAlpha));
	if(config->kType == GAUSSIAN)
		CUDA_SAFE_CALL(cudaFree(devSVDots));
	CUDA_SAFE_CALL(cudaFree(devRho));
	CUDA_SAFE_CALL(cudaFree(devData));
	if(config->kType == GAUSSIAN)
		CUDA_SAFE_CALL(cudaFree(devDataDots));
	CUDA_SAFE_CALL(cudaFree(devResult));
	CUDA_SAFE_CALL(cudaFree(devDots));
	/// release CUDA BLAS
	//cublasStatus status = cublasShutdown();
	cublasStatus_t status = cublasDestroy(handle);
    if (status != CUBLAS_STATUS_SUCCESS) {
        fprintf (stderr, "!!!! shutdown error (BLAS)\n");
        exit(EXIT_FAILURE);
    }

	//for(int i=0;i<config->nTasks;i++)
	//	cudaStreamDestroy(stream[i]);
}

inline unsigned int svmClassify::nextPow2( unsigned int x ) {
    --x;
    x |= x >> 1;
    x |= x >> 2;
    x |= x >> 4;
    x |= x >> 8;
    x |= x >> 16;
    return ++x;
}

void svmClassify::printClassification() {
	FILE* outputFilePointer = fopen(config->outputFilename, "w");
	if (outputFilePointer == NULL) {
		printf("Can't write %s\n", config->outputFilename);
		exit(1);
	}
	for (int i = 0; i < testData->nPoints; i++) {
		fprintf(outputFilePointer, "%d\n", config->classLabels[*(reinterpret_cast<int*>(result)+i)]);
		//fprintf(outputFilePointer, "%g\n", result[i]);
	}
	fclose(outputFilePointer);
}

/**
* Performs SVM classification.
* @param data the data to be classfied, stored as a flat column major array.
* @param nData the number of data points being classified
* @param supportVectors the support vectors of the classifier, stored as a flat column major array.
* @param nSV the number of support vectors of the classifier
* @param nDimension the dimensionality of the data and support vectors
* @param kp a struct containing all the information about the kernel parameters
* @param p_result a pointer to a float pointer where the results will be placed.  The perform classification routine will allocate the output buffer.
*/
//void performClassification(float *data, int nData, float *supportVectors, int nSV, int nDimension, float* alpha, KernelParams kp, float** p_result)
//{
//	chooseLargestGPU(true);
//	int total_nPoints = nData;
//	int nPoints;	
//	float gamma,coef0,b;
//	float degree;
//
//	if(kp.kType == GAUSSIAN)
//	{
//		printf("Found RBF kernel\n");
//		gamma=kp.gamma;
//		b=kp.b;
//	}
//	else if(kp.kType == POLYNOMIAL)
//	{
//		printf("Found polynomial kernel\n");
//		gamma=kp.gamma;
//		degree=kp.degree;
//		coef0 = kp.coef0;
//		b=kp.b;
//	}
//	else if(kp.kType == LINEAR)
//	{
//		printf("Found linear kernel\n");
//		gamma = 1.0;
//		b=kp.b;
//	}
//	else if(kp.kType == SIGMOID)
//	{
//		printf("Found sigmoid kernel\n");
//		gamma = kp.gamma;
//		coef0 = kp.coef0;
//		b=kp.b;
//	}
//	else
//	{
//		printf("Error: Unknown kernel type id - %d\n",kp.kType);
//		exit(0);
//	}
//
//	//int nBlocksSV = intDivideRoundUp(nSV,BLOCKSIZE);
//	int nBlocksSV = (nSV+(BLOCKSIZE-1))/BLOCKSIZE;
//
//	cublasStatus status = cublasInit();
//	if (status != CUBLAS_STATUS_SUCCESS) {
//		printf("CUBLAS initialization error\n");
//		exit(1);
//	}
//
//
//	float* devSV;
//	size_t devSVPitch;
//	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devSV, &devSVPitch, nSV*sizeof(float), nDimension));
//	CUDA_SAFE_CALL(cudaMemcpy2D(devSV, devSVPitch, supportVectors, nSV*sizeof(float), nSV*sizeof(float), nDimension, cudaMemcpyHostToDevice));
//	int devSVPitchInFloats = ((int)devSVPitch) / sizeof(float);
//
//
//	float* devAlpha;
//	CUDA_SAFE_CALL(cudaMalloc((void**)&devAlpha, nSV*sizeof(float)));
//	CUDA_SAFE_CALL(cudaMemcpy(devAlpha, alpha, nSV*sizeof(float), cudaMemcpyHostToDevice));
//
//	float* devLocalValue;
//
//	float* devResult;
//
//	float* result = (float*)malloc(total_nPoints*sizeof(float));
//	*(p_result) = result;
//
//	float* devSVDots;
//	CUDA_SAFE_CALL(cudaMalloc((void**)&devSVDots, sizeof(float)*nSV));
//
//
//	unsigned int free_memory,total_memory;
//	cuMemGetInfo(&free_memory,&total_memory);
//	//printf("\nChecking GPU Memory status...\n");
//	//printf("Total Memory=%d bytes   Available Memory=%d bytes\n",total_memory, free_memory);
//	int free_memory_floats = (long int)free_memory/sizeof(float);
//
//	free_memory_floats = (int)(0.9 * free_memory_floats); 
//
//	nPoints = ((free_memory_floats-devSVPitchInFloats*nDimension-nSV-nSV)/(nDimension+1+devSVPitchInFloats+1+nBlocksSV));
//	nPoints = (nPoints>>7)<<7;		//for pitch limitations assigning to be a multiple of 128
//
//	nPoints = min(nPoints, total_nPoints); //for few points
//	nPoints = min(nPoints, (int)MAX_POINTS); //for too many points	
//
//	//printf("Max points that can reside in GPU memory per call = %d\n\n", nPoints);
//
//	dim3 mapGrid((nSV+BLOCKSIZE-1)/BLOCKSIZE, nPoints);
//	dim3 mapBlock(BLOCKSIZE);
//
//
//	dim3 reduceGrid(1, nPoints);
//	dim3 reduceBlock(mapGrid.x, 1);
//
//
//	float* devData;
//	size_t devDataPitch;
//	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devData, &devDataPitch, nPoints*sizeof(float), nDimension));
//
//	int devDataPitchInFloats = ((int)devDataPitch) / sizeof(float);
//
//	float* devDataDots;
//	CUDA_SAFE_CALL(cudaMalloc((void**)&devDataDots, sizeof(float)*nPoints));
//
//	CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalValue, sizeof(float)*mapGrid.x*mapGrid.y));
//
//	CUDA_SAFE_CALL(cudaMalloc((void**)&devResult, sizeof(float)*mapGrid.y));
//
//	float* devDots;
//	size_t devDotsPitch;
//	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devDots, &devDotsPitch, nSV*sizeof(float), nPoints));
//
//
//
//
//	dim3 threadsLinear(BLOCKSIZE);
//	if(kp.kType == GAUSSIAN)
//	{
//		dim3 blocksSVLinear((nSV+BLOCKSIZE-1)/BLOCKSIZE);
//		makeSelfDots<<<blocksSVLinear, threadsLinear>>>(devSV, devSVPitchInFloats, devSVDots, nSV, nDimension);
//	}
//
//	int iteration=1;
//
//	for(int dataoffset=0; dataoffset<total_nPoints; dataoffset += nPoints) 
//	{
//		// code for copying data
//		if(dataoffset+nPoints > total_nPoints)
//		{
//			nPoints = total_nPoints-dataoffset;
//			mapGrid=dim3((nSV+BLOCKSIZE-1)/BLOCKSIZE, nPoints);
//			mapBlock=dim3(BLOCKSIZE);
//
//			reduceGrid=dim3(1, nPoints);
//			reduceBlock=dim3(mapGrid.x, 1);
//
//			CUDA_SAFE_CALL(cudaFree(devLocalValue));
//			CUDA_SAFE_CALL(cudaMalloc((void**)&devLocalValue, sizeof(float)*mapGrid.x*mapGrid.y));
//
//			//resize & copy devdata, devdots,
//			CUDA_SAFE_CALL(cudaFree(devData));
//			CUDA_SAFE_CALL(cudaMallocPitch((void**)&devData, &devDataPitch, nPoints*sizeof(float), nDimension));
//			devDataPitchInFloats = devDataPitch/sizeof(float);
//		}
//
//		//printf("Number of Points in call #%d=%d \n",iteration, nPoints);
//
//		if(total_nPoints*sizeof(float) < MAX_PITCH)
//		{	
//			CUDA_SAFE_CALL(cudaMemcpy2D(devData, devDataPitch, data+dataoffset, total_nPoints*sizeof(float), nPoints*sizeof(float), nDimension, cudaMemcpyHostToDevice));
//		}
//		else
//		{
//			for(int nd=0;nd<nDimension;nd++)
//			{
//				CUDA_SAFE_CALL(cudaMemcpy(devData+nd*devDataPitchInFloats, data+nd*total_nPoints+dataoffset, nPoints*sizeof(float), cudaMemcpyHostToDevice));	
//			}
//		}
//
//		dim3 blocksDataLinear((nPoints+BLOCKSIZE-1)/BLOCKSIZE);
//		dim3 threadsDots(BLOCKSIZE, 1);
//		dim3 blocksDots((nSV+BLOCKSIZE-1)/BLOCKSIZE, (nPoints+BLOCKSIZE-1)/BLOCKSIZE);
//		int devDotsPitchInFloats = ((int)devDotsPitch)/ sizeof(float);
//
//		if(kp.kType == GAUSSIAN)
//		{
//			makeSelfDots<<<blocksDataLinear, threadsLinear>>>(devData, devDataPitchInFloats, devDataDots, nPoints, nDimension);
//
//			CUDA_SAFE_CALL(cudaMemset(devDots, 0, sizeof(float)*devDotsPitchInFloats*nPoints));
//
//			makeDots<<<blocksDots, threadsDots>>>(devDots, devDotsPitchInFloats, devSVDots, devDataDots, nSV, nPoints);
//
//			cudaDeviceSynchronize(); //unnecessary..onyl for timing..
//		}
//
//		float sgemmAlpha, sgemmBeta;
//		if(kp.kType == GAUSSIAN)
//		{
//			sgemmAlpha = 2*gamma;
//			sgemmBeta = -gamma;
//		}
//		else
//		{
//			sgemmAlpha = gamma;
//			sgemmBeta = 0.0f;
//		}
//
//		cublasSgemm('n', 't', nSV, nPoints, nDimension, sgemmAlpha, devSV, devSVPitchInFloats, devData, devDataPitchInFloats, sgemmBeta, devDots, devDotsPitchInFloats);
//
//		cudaDeviceSynchronize();
//
//		int reduceOffset = (int)pow(2, ceil(log2((float)BLOCKSIZE))-1);
//		//printf("size: %d -> reduceOffset: %d\n", BLOCKSIZE, reduceOffset);
//		int sharedSize = sizeof(float)*(BLOCKSIZE);
//
//
//		if(kp.kType == GAUSSIAN)
//		{
//			computeKernelsReduce<<<mapGrid, mapBlock, sharedSize>>>(devDots, devDotsPitchInFloats, devAlpha, nPoints, nSV, GAUSSIAN, 0.0f, 1.0f, devLocalValue, 1<<int(ceil(log2((float)BLOCKSIZE))-1));
//		}
//		else if(kp.kType == POLYNOMIAL)
//		{
//			computeKernelsReduce<<<mapGrid, mapBlock, sharedSize>>>(devDots, devDotsPitchInFloats, devAlpha, nPoints, nSV, POLYNOMIAL, coef0, degree, devLocalValue, 1<<int(ceil(log2((float)BLOCKSIZE))-1));
//		}
//		else if(kp.kType == LINEAR)
//		{
//			computeKernelsReduce<<<mapGrid, mapBlock, sharedSize>>>(devDots, devDotsPitchInFloats, devAlpha, nPoints, nSV, LINEAR, 0.0f ,1.0f , devLocalValue, 1<<int(ceil(log2((float)BLOCKSIZE))-1));
//		}
//		else if(kp.kType == SIGMOID)
//		{
//			computeKernelsReduce<<<mapGrid, mapBlock, sharedSize>>>(devDots, devDotsPitchInFloats, devAlpha, nPoints, nSV, SIGMOID, coef0, 1.0f, devLocalValue, 1<<int(ceil(log2((float)BLOCKSIZE))-1));
//		}
//
//
//		reduceOffset = (int)pow(2, ceil(log2((float)mapGrid.x))-1);
//		sharedSize = sizeof(float)*mapGrid.x;
//
//		doClassification<<<reduceGrid, reduceBlock, sharedSize>>>(devResult, b, devLocalValue, reduceOffset, mapGrid.x);
//
//		cudaDeviceSynchronize(); //unnecessary..onyl for timing..
//
//		//printf("rest of stuff = %f\n",blas1time+(float)((f.tv_sec-s.tv_sec)+(f.tv_usec-s.tv_usec)/1e6));
//
//		cudaMemcpy(result+dataoffset, devResult, nPoints*sizeof(float), cudaMemcpyDeviceToHost);
//
//
//		iteration++;
//	}
//
//
//	CUDA_SAFE_CALL(cudaFree(devResult));
//	CUDA_SAFE_CALL(cudaFree(devAlpha));
//	CUDA_SAFE_CALL(cudaFree(devData));
//	CUDA_SAFE_CALL(cudaFree(devLocalValue));
//	CUDA_SAFE_CALL(cudaFree(devDots));
//	CUDA_SAFE_CALL(cudaFree(devSV));
//	CUDA_SAFE_CALL(cudaFree(devSVDots));
//	CUDA_SAFE_CALL(cudaFree(devDataDots));
//
//}
