/*
 * parallelYoung.cpp
 *
 *  Created on: May 29, 2013
 *      Author: sjelic
 */

#include "parallelYoung.h"


namespace coveringPacking
{
	ComputingStructures::ComputingStructures(int R, int C, real eps)
	{
		r=R;
		c=C;
		sep='&';
		x =(real *) malloc(c*sizeof(real));
		x_hat =(real *) malloc(r*sizeof(real));
		y = (real *) malloc(r*sizeof(real));
		y_hat = (real *) malloc(c*sizeof(real));
		p = (real *) malloc(r*sizeof(real));
		p_hat = (real *) malloc(c*sizeof(real)); // q in Khaled's lecture notes

		N = ceil( log(r*c) / ((eps*eps)) );
		countITERATIONS=0;
		beta1=log(r*c);

		std::cout << "Start cudaMalloc() " << std::flush;
		cudaMalloc( (void **)&dev_x, c*sizeof(real) );
		cudaMalloc( (void **)&dev_x_hat, r*sizeof(real) );
		cudaMalloc( (void **)&dev_y, r*sizeof(real) );
		cudaMalloc( (void **)&dev_delta_y, r*sizeof(real) );
		cudaMalloc( (void **)&dev_y_hat, c*sizeof(real) );
		cudaMalloc( (void **)&dev_delta_y_hat, c*sizeof(real) );
		cudaMalloc( (void **)&dev_p, r*sizeof(real) );
		cudaMalloc( (void **)&dev_p_hat, c*sizeof(real) );
		cudaMalloc( (void **)&dev_alpha_y, sizeof(real) );
		cudaMalloc( (void **)&dev_alpha_y_hat, sizeof(real) );

		std::cout << "Done!" << std::endl;


	}

	void ComputingStructures::initializeComputingStrucuters(bool cublas)
	{
		// CPU initializing/reseting

		for (int j=0;j<c;j++)
		{
			x[j]=0;
			y_hat[j]=0;
			p_hat[j] = 1.0;
		}
		for(int i=0; i<r; i++)
		{
			x_hat[i]=0;
			y[i]=0;
			p[i] = 1.0;

		}
		countITERATIONS=0;

		//GPU initializing
		cudaHostAlloc((void**) &keepOnRunning,sizeof(bool),cudaHostAllocMapped);
		*keepOnRunning = true;
		cudaHostGetDevicePointer(&dev_keepOnRunning,keepOnRunning,0);

		cudaHostAlloc((void**) &condition8,sizeof(bool),cudaHostAllocMapped);
		*keepOnRunning = true;
		cudaHostGetDevicePointer(&dev_condition8,keepOnRunning,0);
		unsigned long int seed = time(NULL);



		std::cout << "Start cudaMemcpy() ... " << std::flush;
		cudaMemcpy(dev_x, x, c*sizeof(real), cudaMemcpyHostToDevice);
		cudaMemcpy(dev_x_hat, x_hat, r*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_x_old, x, c*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_x_hat_old, x_hat, r*sizeof(real), cudaMemcpyHostToDevice);
		cudaMemcpy(dev_y, y, r*sizeof(real), cudaMemcpyHostToDevice);
		cudaMemcpy(dev_y_hat, y_hat, c*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_y_old, y, r*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_y_hat_old, y_hat, c*sizeof(real), cudaMemcpyHostToDevice);
		cudaMemcpy(dev_p, p, r*sizeof(real), cudaMemcpyHostToDevice);
		cudaMemcpy(dev_p_hat, p_hat, c*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_p_old, p, r*sizeof(real), cudaMemcpyHostToDevice);
		//cudaMemcpy(dev_p_hat_old, p_hat, c*sizeof(real), cudaMemcpyHostToDevice);
		std::cout <<  "Done!" << std::endl;

		if(cublas)
		{
			switch (cublasSetVector(r,sizeof(real),(const void *)p,1,(void *)dev_p,1))
			{
				case CUBLAS_STATUS_SUCCESS:
					std::cout<<"The operation completed successfully."<<std::endl;
					break;
				case CUBLAS_STATUS_NOT_INITIALIZED:
					std::cout<<"The library was not initialized."<<std::endl;
					break;
				case CUBLAS_STATUS_INVALID_VALUE:
					std::cout<<"The parameters incx, incy, elemSize <=0"<<std::endl;
					break;
				case CUBLAS_STATUS_MAPPING_ERROR:
					std::cout<<"There was an error accessing GPU memory"<<std::endl;
					break;
				default:
					std::cout<<"Unknown error."<<std::endl;
					break;
			}

			switch (cublasSetVector(c,sizeof(real),(const void *)y_hat,1,(void *)dev_delta_y_hat,1))
			{
				case CUBLAS_STATUS_SUCCESS:
					std::cout<<"The operation completed successfully."<<std::endl;
					break;
				case CUBLAS_STATUS_NOT_INITIALIZED:
					std::cout<<"The library was not initialized."<<std::endl;
					break;
				case CUBLAS_STATUS_INVALID_VALUE:
					std::cout<<"The parameters incx, incy, elemSize <=0"<<std::endl;
					break;
				case CUBLAS_STATUS_MAPPING_ERROR:
					std::cout<<"There was an error accessing GPU memory"<<std::endl;
					break;
				default:
					std::cout<<"Unknown error."<<std::endl;
					break;
			}
		}
		else
		{
			/*cudaMemcpy(dev_y_hat, y_hat, c*sizeof(real), cudaMemcpyHostToDevice);
			cudaMemcpy(dev_p, p, r*sizeof(real), cudaMemcpyHostToDevice);*/

		}
	}

	ComputingStructures::~ComputingStructures()
	{
		free( x );
		free( x_hat );
		free( y );
		free( p );
		free( y_hat );
		free( p_hat );

		cudaFree(dev_x);
		cudaFree(dev_x_hat);
		//cudaFree(dev_x_old);
		//cudaFree(dev_x_hat_old);
		cudaFree(dev_y);
		cudaFree(dev_y_hat);
		//cudaFree(dev_y_old);
		//cudaFree(dev_y_hat_old);
		cudaFree(dev_p);
		cudaFree(dev_p_hat);
		//cudaFree(dev_p_old);
		//cudaFree(dev_p_hat_old);
		cudaFree(dev_delta_y);
		cudaFree(dev_delta_y_hat);
		cudaFree(dev_alpha_y);
		cudaFree(dev_alpha_y_hat);

	}

	PerformanceStructures::PerformanceStructures()
	{
		prim_obj=0;
		dual_obj=0;
		normalizationTime=0;
		changeOfLHSTime=0;
		alphaComputationTime=0;
		updateOfPrimalDualTime=0;
		scalePrimalDualTime=0;
	}
	real PerformanceStructures::getPrimalDualGap()
	{
		if(dual_obj==0)
		{
			return prim_obj;
		}
		else
		{
			return prim_obj/dual_obj;
		}

	}

	float PerformanceStructures::totalComputationTime()
	{
		return normalizationTime+changeOfLHSTime+alphaComputationTime+updateOfPrimalDualTime+scalePrimalDualTime;
	}

	PackingLinearProgram::PackingLinearProgram(string filePath, float D, bool NORMALIZED, bool CUBLAS)
	{
		//reading matrix A from file
		normalized=NORMALIZED;
		if(normalized)
		{
			readFromFileNormalized(filePath.c_str(),&A,r,c,numberOfNonzero);
		}
		else
		{
			readFromFileOriginal(filePath.c_str(),&A,&w,&b,r,c,numberOfNonzero);
			transformeOriginalToNormalized();
		}

		d=D;


		if(CUBLAS)
		{
			 cudaMalloc( (void **)&dev_A_cublas, c*r*sizeof(matrix_type) );
			 switch (cublasSetMatrix(r,c,sizeof(matrix_type),(const void *)A,r,(void *)dev_A_cublas,r))
			 {
			 	case CUBLAS_STATUS_SUCCESS:
			 		std::cout<<"The operation completed successfully."<<std::endl;
			 		break;
				case CUBLAS_STATUS_NOT_INITIALIZED:
					std::cout<<"The library was not initialized."<<std::endl;
					break;
				case CUBLAS_STATUS_INVALID_VALUE:
					std::cout<<"The parameters rows, cols<0 or elemSize,lda, ldb<=0"<<std::endl;
					break;
				case CUBLAS_STATUS_MAPPING_ERROR:
					std::cout<<"There was an error accessing GPU memory"<<std::endl;
					break;
				default:
					std::cout<<"Unknown error."<<std::endl;
					break;
			 }

		}
		else
		{
			// GPU initializing of matrix A
			cudaChannelFormatDesc description = cudaCreateChannelDesc<matrix_type>();
			cudaMallocArray(&dev_A, &description, c, r);
			// Copy image data to array
			cudaMemcpyToArray(dev_A, 0, 0, A, c*r*sizeof(matrix_type), cudaMemcpyHostToDevice);
			// Bind the array to the texture

			cudaBindTextureToArray(tex, dev_A);
			// Set texture parameters (default)
			tex.addressMode[0] = cudaAddressModeClamp;
			tex.addressMode[1] = cudaAddressModeClamp;
			tex.filterMode = cudaFilterModePoint;
			tex.normalized = false; // do not normalize coordinates

		}

	}
	PackingLinearProgram::PackingLinearProgram(string inputFilePath)
	{
		// GPU initializing of matrix
				// Replace 'Plop' with your file name.
			std::ifstream file;
			file.open(inputFilePath.c_str(),std::ios::out);
			/*if(!file)
			{
				std::cout<<"File nije otvoren!!"<<std::endl;
			}*/

			std::string   line;
			int count=0;
			int countNonzeroElements=0;
			int value;
			std::getline(file, line);
			//std::cout<<line<<std::endl;
			std::stringstream  linestream(line);
			linestream >> value;
			r=(int)(value);
			linestream >> value;
			c=(int)(value);
			//linestream>>value;
			/*
			linestream >> value;
			*/
			std::cout<<"r = "<<r<<"; c = "<<c<<std::endl;
			if(r>0&&c>0)
			{
				A=(matrix_type *)calloc(r*c,sizeof(matrix_type));
				w=(real *)malloc(r*c*sizeof(real));
			}
			else
			{
				std::cout<<"Number of rows and columns must be positive!"<<std::endl;
				exit(0);
			}
			linestream >> value;
			while(std::getline(file, line))
			{

					std::stringstream  linestream(line);
					linestream >> value;
					w[count]=((real)value)/2.0;
					std::cout<<w[count];
					linestream >> value;
					int numOfRows=(int)value;
					int countRows=0;
					std::cout<<" "<<numOfRows<<" ";
					while(linestream >> value)
					{
						int row = (int)value-1;
						std::cout<<row<<" ";
						if(row>=0&&row<r)
						{
							if(w[count]<=0)
							{
								std::cout<<"Negative or zero weight!!! Exit!"<<std::endl;
								exit(0);
							}
							else
							{
								A[count*r+row]=1.0/w[count];
								std::cout<<A[count*r+row]<<" ";
								countNonzeroElements++;
							}

						}
						else
						{
							std::cout<<"Index: Out of the range!!!"<<std::endl;
						}
						countRows++;
					}
					std::cout<<std::endl;
					if(countRows!=numOfRows)
					{
						std::cout<<"Number of rows that are covered by column "<<count+1<<" doesn't match reporter number!!!"<<std::endl;
						exit(0);
					}
					count++;
			}
			if(count!=c)
			{
				std::cout<<"Real number of columns and reported number of columns don't match!!!"<<std::endl;
				exit(0);
			}
			d=countNonzeroElements/(r*c);
			std::cout<<"Rail Covering Instance created. Density of the matrix A: "<<d<<"."<<std::endl;
			file.close();

			cudaChannelFormatDesc description = cudaCreateChannelDesc<matrix_type>();
			cudaMallocArray(&dev_A, &description, c, r);
			// Copy image data to array
			cudaMemcpyToArray(dev_A, 0, 0, A, c*r*sizeof(matrix_type), cudaMemcpyHostToDevice);
			// Bind the array to the texture

			cudaBindTextureToArray(tex, dev_A);
			  // Set texture parameters (default)
			tex.addressMode[0] = cudaAddressModeClamp;
			tex.addressMode[1] = cudaAddressModeClamp;
			tex.filterMode = cudaFilterModePoint;
			tex.normalized = false; // do not normalize coordinates
	}

	PackingLinearProgram::~PackingLinearProgram()
	{
		//cudaUnbindTexture(tex);
		cudaFree(dev_A);
		free(A);
		if(!normalized)
		{
			free(w);
			free(b);
		}
	}

	void PackingLinearProgram::readFromFileNormalized(const char *fileName,matrix_type** A, int &m, int &n, long &nonZeros){
	/*
		reading from the file:
			1. read the dimension: n,m
			2. read matrix A in a row major array nxm

	*/

		FILE *filePt;
		filePt = std::fopen(fileName,"r");


		if(!filePt){ printf("Input file missing ..."); exit(0);}
		std::fscanf(filePt,"%d %d %ld",&m,&n,&nonZeros);

		*A = (matrix_type *) calloc(m*n,sizeof(matrix_type));
		index row=0,col=0;
		integer readings=0;
		while(readings++<nonZeros)
		{
			//matrix_type value;
			float value;

			//std::fscanf(filePt,"%d %d %f",&row,&col,&value);
			//(*A)[col+row*m] = (matrix_type) value;

			std::fscanf(filePt,"%d %d %f",&row,&col,&value);
			(*A)[col+row*n] = (matrix_type) abs(value); //row-wise

		}

		std::fclose(filePt);
	};

	void PackingLinearProgram::readFromFileOriginal(const char *fileName,matrix_type** A, real **c, real **b, int &m, int &n, long &nonZeros)
	{
			/*reading from the file:
				1. read the dimension: m rows, n columns
				2. read matrix A in a column-wise array i.e. A^T is m rows and n cols
				3. procedure reads number of non-zero entries for matrix A, then n elements for c and m elements for b*/

		    FILE *filePt;
		    filePt = std::fopen(fileName,"r");

		   if(!filePt){ printf("Input file missing ...\n"); exit(0);}
		   std::fscanf(filePt,"%d %d %ld",&m,&n,&nonZeros);

		   // memory alocation for arrays
		   *A = (matrix_type *) calloc(m*n,sizeof(matrix_type)); // matrix A
		   *b = (real *) calloc(m,sizeof(real));
		   *c = (real *) calloc(n,sizeof(real));

		   index row = 0,col = 0;
		   integer readings = 0;

		   // read matrix A
		   while(readings++<nonZeros)
		   {
				float value;
				std::fscanf(filePt,"%d %d %f",&row,&col,&value);
				(*A)[col+row*n] = (matrix_type) abs(value); // row-wise storing
		   }
		   // read array c
		   readings=0;
		   while(readings<n)
		   {
				float value;
				std::fscanf(filePt,"%f",&value);
				(*c)[readings] = (real) value;
		                readings++;
		   }
		   // read array b
		   readings=0;
		   while(readings<m)
		   {
				float value;
				std::fscanf(filePt,"%f",&value);
				(*b)[readings] = (real) value;
				readings++;
		   }

		    std::fclose(filePt);
	}

	void PackingLinearProgram::transformeOriginalToNormalized()
	{
		for(int i=0; i<r; i++)
		{
			for(int j=0; j<c; j++)
			{
				A[i*c+j]/=w[j]*b[i];
			}
		}

	}

	void PackingLinearProgram::transformNormalizedToOriginal(real *x, real *y)
	{
		for(int j=0; j<c; j++)
		{
			x[j]*=w[j];
		}
		for(int i=0; i<r; i++)
		{
			y[i]*=b[i];
		}

	}

	/*bool PackingLinearProgram::isNormalized()
	{
		bool isNormalized=true;
		for(int i=0; i<r; i++)
		{
			if(b[i]!=1.0) isNormalized=false;
		}
		for(int i=0; i<c; i++)
		{
			if(w[i]!=1.0) isNormalized=false;
		}
		return isNormalized;
	}*/


	ParallelYoung::ParallelYoung()
	{

	}

	void ParallelYoung::cpuScalingSolutions()
	{
		cudaMemcpy(myComputingStructures->x, myComputingStructures->dev_x, myPackingLinearProgram->c*sizeof(real), cudaMemcpyDeviceToHost);
		cudaMemcpy(myComputingStructures->x_hat, myComputingStructures->dev_x_hat, myPackingLinearProgram->r*sizeof(real), cudaMemcpyDeviceToHost);
		double scale_prim = -1.0;
		double scale_temp;
		double scale_dual = INFTY;

		for (int i=0;i<myPackingLinearProgram->r;i++)
		{
			scale_temp = 0.0;
			for (int j=0;j<myPackingLinearProgram->c;j++)
			{
				scale_temp += myPackingLinearProgram->A[i*myPackingLinearProgram->c+j]*myComputingStructures->x[j];
			}
			if (scale_temp > scale_prim) scale_prim = scale_temp;
		}
		for (int j=0;j<myPackingLinearProgram->c;j++)
		{
			scale_temp = 0.0;
			for (int i=0;i<myPackingLinearProgram->r;i++)
			{
				scale_temp += myPackingLinearProgram->A[i*myPackingLinearProgram->c+j]*myComputingStructures->x_hat[i];
			}
			if (scale_temp < scale_dual) scale_dual = scale_temp;
		}

		double prim = 0.0;
		double prim_tmp = 0.0;
		for (int j=0;j<myPackingLinearProgram->c; j++)
		{
			//cout << x[j]/scale_prim << " , " ;
			prim_tmp += myComputingStructures->x[j];
			prim += myComputingStructures->x[j]/scale_prim;
		}
		std::cout << std::endl;
		myPerformanceStructures->prim_obj=prim;
		std::cout <<">>> Primal value : " << prim << std::endl;

		double dual = 0.0;
		double dual_tmp = 0.0;
		for (int i=0;i<myPackingLinearProgram->r; i++)
		{
			dual_tmp += myComputingStructures->x_hat[i];
			dual += myComputingStructures->x_hat[i]/scale_dual;
		}
		std::cout << std::endl;
		myPerformanceStructures->dual_obj=dual;
		std::cout <<">>> Dual value : " << dual << std::endl;

		std::cout <<"Primal/(1-eps) > Dual : "<< prim / (1-eps) << " > " << dual << std::endl;
		std::cout <<" Gap is: " << myPerformanceStructures->getPrimalDualGap() << std::endl;
	}


	ParallelYoung::~ParallelYoung()
	{

		//myComputingStructures->~ComputingStructures();//????
	}

	ParallelYoungDerandomized::ParallelYoungDerandomized(real EPS, string filePath, float D, bool normalized)
	{
		eps=EPS;
		// initializing of computing structure

		myPackingLinearProgram = new PackingLinearProgram(filePath,D,normalized,CUBLAS);
		myComputingStructures = new ComputingStructures(myPackingLinearProgram->r, myPackingLinearProgram->c,eps);
		myPerformanceStructures = new PerformanceStructures();
		std::cout << "N = " << myComputingStructures->N << std::endl;


	}

	template <class myType>
	__device__ void warpReduce(volatile myType* sdata, int tid) {
		sdata[tid] += sdata[tid + 32];
		sdata[tid] += sdata[tid + 16];
		sdata[tid] += sdata[tid + 8];
		sdata[tid] += sdata[tid + 4];
		sdata[tid] += sdata[tid + 2];
		sdata[tid] += sdata[tid + 1];
	}
	template<class myType>
	__device__ void warpReduceMax(volatile myType* sdata, int tid) {
		if (sdata[tid] < sdata[tid + 32]) sdata[tid]  = sdata[tid + 32];
		if (sdata[tid] < sdata[tid + 16]) sdata[tid]  = sdata[tid + 16];
		if (sdata[tid] < sdata[tid + 8]) sdata[tid]  = sdata[tid + 8];
		if (sdata[tid] < sdata[tid + 4]) sdata[tid]  = sdata[tid + 4];
		if (sdata[tid] < sdata[tid + 2]) sdata[tid]  = sdata[tid + 2];
		if (sdata[tid] < sdata[tid + 1]) sdata[tid]  = sdata[tid + 1];
	}

	template<class myType>
	__device__ void warpReduceMin(volatile myType* sdata, int tid) {
		if (sdata[tid] > sdata[tid + 32]) sdata[tid]  = sdata[tid + 32];
		if (sdata[tid] > sdata[tid + 16]) sdata[tid]  = sdata[tid + 16];
		if (sdata[tid] > sdata[tid + 8]) sdata[tid]  = sdata[tid + 8];
		if (sdata[tid] > sdata[tid + 4]) sdata[tid]  = sdata[tid + 4];
		if (sdata[tid] > sdata[tid + 2]) sdata[tid]  = sdata[tid + 2];
		if (sdata[tid] > sdata[tid + 1]) sdata[tid]  = sdata[tid + 1];
	}


	template<class myType>
	__device__ void compute_max(myType *y, int r, myType *y_max,  int tx)
	{

	 __shared__ myType cache[nrOfThreads];
	 int idx = tx;
	 cache[idx] = 0;
	 __syncthreads();

	 while ( (idx< r ))
	 {
		if (cache[tx] < y[idx]) cache[tx] = y[idx];
		//printf("tx:%d  cache = %f\n ",tx, cache[tx]);
		idx +=nrOfThreads;
	 }

	 // sync threads within a block
	 __syncthreads();

	  // reduction!
	 if (nrOfThreads >=1024) {if (tx < 512) { if (cache[tx] < cache[tx + 512]) cache[tx] = cache[tx+512]; }__syncthreads(); }
	 if (nrOfThreads >= 512) {if (tx < 256) { if (cache[tx] < cache[tx + 256]) cache[tx] = cache[tx+256]; }__syncthreads(); }
	 if (nrOfThreads >= 256) {if (tx < 128) { if (cache[tx] < cache[tx + 128]) cache[tx] = cache[tx+128]; } __syncthreads();}
	 if (nrOfThreads >= 128) {if (tx < 64) {  if (cache[tx] < cache[tx + 64] ) cache[tx] = cache[tx+64]; } __syncthreads(); }
	 if (tx < 32) warpReduceMax<myType>(cache, tx);


	 if (tx ==0) {(*y_max) = cache[tx]; }

	}

	template<class myType>
	__device__ void compute_min(myType *y, int c, myType *y_min, int tx)
	{

		__shared__ myType cache[nrOfThreads];
		int idx = tx;
		cache[idx] = INFTY;
		__syncthreads();
		while ( (idx< c ))
		{
			if (cache[tx] > y[idx]) cache[tx] = y[idx];
			idx +=nrOfThreads;
		}
		__syncthreads();

		// reduction!
		if (nrOfThreads >=1024) {if (tx < 512) { if (cache[tx] > cache[tx + 512]) cache[tx] = cache[tx+512]; }__syncthreads(); }
		if (nrOfThreads >= 512) {if (tx < 256) { if (cache[tx] > cache[tx + 256]) cache[tx] = cache[tx+256]; }__syncthreads(); }
		if (nrOfThreads >= 256) {if (tx < 128) { if (cache[tx] > cache[tx + 128]) cache[tx] = cache[tx+128]; } __syncthreads();}
		if (nrOfThreads >= 128) {if (tx < 64) {  if (cache[tx] > cache[tx + 64] ) cache[tx] = cache[tx+64]; } __syncthreads(); }
		if (tx < 32) warpReduceMin<myType>(cache, tx);
		if (tx ==0) (*y_min) = cache[tx];
	}

	template <class myType>
	__device__ void sumVectorElements(myType *y, int tx, int s, int e, myType *sum)
	{
		int idx = tx;
		int threads_per_block = blockDim.x;
		__shared__ myType cache[1024];

		while(idx < 1024)
		{
			cache[idx] = 0;
			idx += threads_per_block;
		}

		idx = tx;
		__syncthreads();

		while (idx + s<= e )
		{
			if (tx < 1024)
			{
				cache[tx] += y[idx + s];
			}
			idx += 1024;
		}
		__syncthreads();

		if (threads_per_block >=1024) {if (tx < 512) { cache[tx] += cache[tx + 512]; }__syncthreads(); }
		if (threads_per_block >= 512) {if (tx < 256) { cache[tx] += cache[tx + 256]; }__syncthreads(); }
		if (threads_per_block >= 256) {if (tx < 128) { cache[tx] += cache[tx + 128]; } __syncthreads(); }
		if (threads_per_block >= 128) {if (tx < 64) { cache[tx] += cache[tx + 64]; } __syncthreads(); }
		if (tx < 32) warpReduce<myType>(cache, tx);

		if (tx ==0)	(*sum) = cache[tx];

	}

	template<class myType>
	__device__ void sumCache(volatile myType* cache, int dim, int tx, myType *sum)
	{
		if (dim >=1024) {if (tx < 512) { cache[tx] += cache[tx + 512]; }__syncthreads(); }
		if (dim >= 512) {if (tx < 256) { cache[tx] += cache[tx + 256]; }__syncthreads(); }
		if (dim >= 256) {if (tx < 128) { cache[tx] += cache[tx + 128]; } __syncthreads(); }
		if (dim >= 128) {if (tx < 64) { cache[tx] += cache[tx + 64]; } __syncthreads(); }
		if (tx < 32) warpReduce<real>(cache, tx);

		if (tx ==0)	(*sum) = cache[tx];
	}

	__global__ void normalizeWeightVectors(real *p, real *p_hat, int r, int c)
	{
	 // 2 blocks and nrOfThreads threads

		int bx = blockIdx.x;
		int tx = threadIdx.x;
		int idx = tx;
		__shared__ real sum_p, sum_p_hat;
		if (bx == 0)
		{

			sumVectorElements<real>(p, tx, 0, r-1, &sum_p);
			__syncthreads();
			while(idx<r)
			{
				p[idx] /= sum_p;
				//printf("%.3f",p[idx]);
				idx += nrOfThreads;

			}
		}
		if (bx == 1)
		{
			sumVectorElements<real>(p_hat, tx, 0, c-1, &sum_p_hat);
			__syncthreads();

			while(idx<c)
			{
				p_hat[idx] /= sum_p_hat;
				//printf("%.3f",p_hat[idx]);
				idx += nrOfThreads;
			}
		}
	}

	// compute A*p_hat = delta_y and A^T*p = delta_y_hat
	__global__ void computeChangeInDualLHS(real *p, real *delta_y_hat, int r, int c)
	{
		// start kernel with c blocks (for each row one) and nrOfThreads threads. ( r+c <=65536 )
		int tx = threadIdx.x;
		int bx = blockIdx.x;

		int idx;
		int threads_per_block = blockDim.x;
		int blocks_per_grid = gridDim.x;
		__shared__ real cache[256];

		__syncthreads();

		while(bx<c)
		{
			cache[tx] = 0;
			idx=tx;
			while (idx < r)
			{
				//A_deltaY_hat[bx*r + idx] = tex2D(tex, bx, idx)* p[idx];
				cache[tx] += tex2D(tex, bx, idx)* p[idx];
				idx += threads_per_block;
			}
			__syncthreads();

			sumCache<real>(cache, threads_per_block, tx, delta_y_hat+bx);
			bx+=blocks_per_grid;
		}
	}

	__global__ void computeChangeInPrimalLHS(real *p_hat, real *delta_y, int r, int c)
	{
		// start kernel with r blocks (for each row one) and nrOfThreads threads. ( r+c <=65536 )
		int tx = threadIdx.x;
		int bx = blockIdx.x;
		int idx;
		int threads_per_block = blockDim.x;
		int blocks_per_grid = gridDim.x;
		__shared__ real cache[256];

		__syncthreads();

		while(bx < r)
		{
			cache[tx] = 0;
			idx=tx;
			while (idx < c)
			{
				cache[tx] += tex2D(tex, idx, bx)* p_hat[idx];
				idx += threads_per_block;
			}
			__syncthreads();
			sumCache<real>(cache, threads_per_block, tx, delta_y+bx);
			bx+=blocks_per_grid;
		}
	}

	__global__ void computeAlpha(real *delta_y, real *delta_y_hat, real *alpha_y, real *alpha_y_hat, int r, int c)
		{
			int bx = blockIdx.x;
			int tx = threadIdx.x;

			if (bx == 0) compute_max<real>(delta_y, r, alpha_y,  tx);
			if (bx == 1) compute_max<real>(delta_y_hat, c, alpha_y_hat,  tx);

		}

		__global__ void updatePrimDual(real *x, real *x_hat, real *p, real *p_hat, real *alpha_y, real *alpha_y_hat, \
									real *y, real *y_hat, real *delta_y, real *delta_y_hat, int r, int c, real eps, \
									bool* keepOnRunning, const int N, real beta)
		{
			// 2 blocks and nrOfThreads threads
			int bx = blockIdx.x;
			int tx = threadIdx.x;
			int idx = tx ;
			__shared__ real alpha;
			if (tx == 0) alpha = (*alpha_y) < (*alpha_y_hat) ? beta/(*alpha_y_hat): beta/(*alpha_y);
			__syncthreads();

			if (bx == 0)
			{
				while(idx < c)
				{
					x[idx] += alpha*p_hat[idx];
					y_hat[idx] += alpha*delta_y_hat[idx];
					if (y_hat[idx] > N) p_hat[idx] = 0.0;
					if (p_hat[idx] != 0.0) p_hat[idx] *= powf((1-eps), alpha*delta_y_hat[idx]);
					idx +=nrOfThreads;
				}
			}
			if (bx == 1)
			{
				while(idx < r)
				{
					x_hat[idx] += alpha*p[idx];
					y[idx] += alpha*delta_y[idx];
					if (y[idx] > N) (*keepOnRunning) = false;
					p[idx] *= powf((1+eps), alpha*delta_y[idx]);
					idx +=nrOfThreads;
				}
			}
		}

		__global__ void scalePrimDual(real *x, real *x_hat, real *y, real *y_hat, int r, int c, real eps, bool *keepOnRunning, int count)
		{
			// 1 blocks and nrOfThreads threads
			int tx = threadIdx.x;

			__shared__ real M;
			__shared__ real m;

			compute_max<real>(y, r, &M,  tx);
			__syncthreads();
			compute_min<real>(y_hat, c, &m,  tx);
			__syncthreads();

			if (tx ==0)
			{
				//(*beta1)=logf(r*c)-(m*logf(1-eps)+M*logf(1+eps));
				/*if(logf(r*c)<(m*logf(1-eps)+M*logf(1+eps)))
				{
					printf("%d.  uvjet : FALSE\n",count);
					(*keepOnRunning) = false;
				}*/
				//printf("%.3f --- %.3f\n",m,M);
				if (m/(1-eps) >  M) (*keepOnRunning) = false;

			}
		}

	__global__ void test(const matrix_type *A, const double *x, double *y, int m, int n)
	{
		unsigned int tx = threadIdx.x;
		if(tx==0)
		{
			printf("\n %.3f\n",A[tx]);
			printf("\n %.3f\n",A[tx+1]);
			printf("\n %.3f\n",A[tx+2]);

			printf("\n %.3f\n",x[tx]);
			printf("\n %.3f\n",x[tx+1]);
			printf("\n %.3f\n",x[tx+2]);

			printf("\n %.3f\n",y[tx]);
			printf("\n %.3f\n",y[tx+1]);
			printf("\n %.3f\n",y[tx+2]);
		}

	}

	void ParallelYoungDerandomized::cublasChangeInLHS()
	{
		//cudaError_t cudaStat;
		//cublasStatus_t stat;
		cublasHandle_t handle;
		myComputingStructures->initializeComputingStrucuters(true);
		const double alpha = 1.0;
		const double beta = 0.0;

		switch(cublasCreate_v2(&handle))
		{
			case CUBLAS_STATUS_SUCCESS:
				std::cout<<"The operation completed successfully."<<std::endl;
				break;
			case CUBLAS_STATUS_NOT_INITIALIZED:
				std::cout<<"The CUBLAS library was not initialized."<<std::endl;
				break;
			case CUBLAS_STATUS_ALLOC_FAILED:
				std::cout<<"Resource allocation failed inside the CUBLAS library. This is usually caused by a cudaMalloc() failure."<<std::endl;
				break;
			case CUBLAS_STATUS_INVALID_VALUE:
				std::cout<<"An unsupported value or parameter was passed to the function."<<std::endl;
				break;
			case CUBLAS_STATUS_ARCH_MISMATCH:
				std::cout<<"The function requires a feature absent from the device architecture; usually caused by the lack of support for double precision."<<std::endl;
				break;
			case CUBLAS_STATUS_MAPPING_ERROR:
				std::cout<<"An access to GPU memory space failed, which is usually caused by a failure to bind a texture."<<std::endl;
				break;
			case CUBLAS_STATUS_EXECUTION_FAILED:
				std::cout<<"The GPU program failed to execute. This is often caused by a launch failure of the kernel on the GPU, which can be caused by multiple reasons."<<std::endl;
				break;
			case CUBLAS_STATUS_INTERNAL_ERROR:
				std::cout<<"An internal CUBLAS operation failed. This error is usually caused by a cudaMemcpyAsync() failure."<<std::endl;
				break;
			default:
				std::cout<<"Unknown error."<<std::endl;
				break;

		}
		cudaThreadSynchronize();
		test<<<1,1,32>>>(myPackingLinearProgram->dev_A_cublas,myComputingStructures->dev_p,myComputingStructures->dev_delta_y_hat,myPackingLinearProgram->r,myPackingLinearProgram->c);
		cudaThreadSynchronize();
		switch(cublasDgemv_v2(handle,CUBLAS_OP_N,myPackingLinearProgram->c,myPackingLinearProgram->r,&alpha,(const double *)myPackingLinearProgram->dev_A_cublas,myPackingLinearProgram->c,myComputingStructures->dev_p,1,&beta,myComputingStructures->dev_delta_y_hat,1))
		{
			case CUBLAS_STATUS_SUCCESS:
				std::cout<<"The operation completed successfully."<<std::endl;
				break;
			case CUBLAS_STATUS_NOT_INITIALIZED:
				std::cout<<"The library was not initialized."<<std::endl;
				break;
			case CUBLAS_STATUS_INVALID_VALUE:
				std::cout<<"The parameters m,n<9 or incx,incy=0."<<std::endl;
				break;
			case CUBLAS_STATUS_ARCH_MISMATCH:
				std::cout<<"The device does not support double-precision."<<std::endl;
				break;
			case CUBLAS_STATUS_EXECUTION_FAILED:
				std::cout<<"The function failed to launch on the GPU."<<std::endl;
				break;
			default:
				std::cout<<"Unknown error."<<std::endl;
				break;
		}
		cudaThreadSynchronize();
		test<<<1,1,32>>>(myPackingLinearProgram->dev_A_cublas,myComputingStructures->dev_p,myComputingStructures->dev_delta_y_hat,myPackingLinearProgram->r,myPackingLinearProgram->c);
		cudaThreadSynchronize();

		switch (cublasGetVector(myPackingLinearProgram->c,sizeof(real),(const void *)myComputingStructures->dev_delta_y_hat,1,(void *)myComputingStructures->y_hat,1))
		{
			case CUBLAS_STATUS_SUCCESS:
				std::cout<<"The operation completed successfully."<<std::endl;
				break;
			case CUBLAS_STATUS_NOT_INITIALIZED:
				std::cout<<"The library was not initialized."<<std::endl;
				break;
			case CUBLAS_STATUS_INVALID_VALUE:
				std::cout<<"The parameters incx, incy, elemSize <=0"<<std::endl;
				break;
			case CUBLAS_STATUS_MAPPING_ERROR:
				std::cout<<"There was an error accessing GPU memory"<<std::endl;
				break;
			default:
				std::cout<<"Unknown error."<<std::endl;
				break;
		}
		cudaThreadSynchronize();
		std::cout<<std::endl;
		std::cout<<std::endl;
		std::cout<<"A = [";
		for(unsigned int i=0; i<myPackingLinearProgram->r; i++)
		{
			for(unsigned int j=0; j<myPackingLinearProgram->c; j++)
			{
				std::cout<<"\t"<<myPackingLinearProgram->A[i*(myPackingLinearProgram->c)+j];
			}
			std::cout<<std::endl;
		}
		std::cout<<"]";
		std::cout<<std::endl;
		std::cout<<std::endl;

		std::cout<<"p = [";
		for(unsigned int i=0; i<myPackingLinearProgram->r; i++)
		{
			std::cout<<"\t"<<myComputingStructures->p[i];
		}
		std::cout<<"]";
		std::cout<<std::endl;
		std::cout<<std::endl;
		std::cout<<"A^T x p = [";
		for(unsigned int i=0; i<myPackingLinearProgram->c; i++)
		{
			std::cout<<"\t"<<myComputingStructures->y_hat[i];
		}
		std::cout<<"]";
		std::cout<<std::endl;



		cublasDestroy_v2(handle);
	}

	void ParallelYoungDerandomized::optimizeInstance(real EPS, real BETA)

	{
		myComputingStructures->initializeComputingStrucuters(false);
		std::cout << "Kernels launched ... " << std::endl;
		real beta=BETA;
		//real beta = 12*ceil(log(0.34*(myPackingLinearProgram->r)*(myPackingLinearProgram->c)));
		//std::ofstream outputFile;
		//outputFile.open(logFile.c_str(),std::ios_base::app);

		/*if(!outputFile.is_open())
		{
			std::cout<<"ERROR: Log file can not be created for writing! Exit!"<<std::cout;
			exit(0);
		}*/

		//outputFile<<"Iteration &  Primal  &   Dual   &    M    &    m    &    alpha    &   |p|   &    |p_hat|   &    Phi     &   M-m   &   m/M   "<<std::endl;
		//outputFile<<std::endl<<std::endl;
		//outputFile<<"      0     &      0     &   0   &    0    &    0   &    0    &    "<<r<<"   &    "<<c<<"   &     "<<r*c<<"     &   0   &   "<<1.0*r/c<<"   "<<std::endl;

		Miscellaneous::Timer timeNormalize; timeNormalize.start(); timeNormalize.stop();
		Miscellaneous::Timer timeChangeLHS; timeChangeLHS.start(); timeChangeLHS.stop();
		Miscellaneous::Timer timeComputeAlpha; timeComputeAlpha.start(); timeComputeAlpha.stop();
		Miscellaneous::Timer timeUpdatePrimDual; timeUpdatePrimDual.start(); timeUpdatePrimDual.stop();
		Miscellaneous::Timer timeScalePrimDual; timeScalePrimDual.start(); timeScalePrimDual.stop();

		cudaThreadSynchronize();
		while(*(myComputingStructures->keepOnRunning))
		{
			myComputingStructures->countITERATIONS++;
			timeNormalize.cont();
			normalizeWeightVectors<<<2, nrOfThreads>>>(myComputingStructures->dev_p, myComputingStructures->dev_p_hat, myPackingLinearProgram->r, myPackingLinearProgram->c);
			cudaThreadSynchronize();
			timeNormalize.stop();


			timeChangeLHS.cont();
			//computeChangeInLHS<<<max(r,c), 128>>>(dev_p, dev_p_hat, dev_delta_y, dev_delta_y_hat, r, c); // compute expected (!) y = Ax  and expected y_hat = A^Tx_hat
			computeChangeInDualLHS<<<myPackingLinearProgram->c, 128>>>(myComputingStructures->dev_p, myComputingStructures->dev_delta_y_hat, myPackingLinearProgram->r, myPackingLinearProgram->c);
			cudaThreadSynchronize();
			computeChangeInPrimalLHS<<<myPackingLinearProgram->r, 128>>>(myComputingStructures->dev_p_hat, myComputingStructures->dev_delta_y, myPackingLinearProgram->r, myPackingLinearProgram->c);
			cudaThreadSynchronize();
			timeChangeLHS.stop();

			timeComputeAlpha.cont();
			computeAlpha<<<2, nrOfThreads>>>(myComputingStructures->dev_delta_y, myComputingStructures->dev_delta_y_hat, myComputingStructures->dev_alpha_y, myComputingStructures->dev_alpha_y_hat, myPackingLinearProgram->r, myPackingLinearProgram->c);
			cudaThreadSynchronize();
			timeComputeAlpha.stop();



			timeUpdatePrimDual.cont();


			updatePrimDual<<<2, nrOfThreads>>>(myComputingStructures->dev_x, myComputingStructures->dev_x_hat, myComputingStructures->dev_p, myComputingStructures->dev_p_hat, myComputingStructures->dev_alpha_y, myComputingStructures->dev_alpha_y_hat, \
					myComputingStructures->dev_y, myComputingStructures->dev_y_hat, myComputingStructures->dev_delta_y, myComputingStructures->dev_delta_y_hat, myPackingLinearProgram->r, myPackingLinearProgram->c, eps+EPS, myComputingStructures->dev_keepOnRunning, myComputingStructures->N,beta);

			cudaThreadSynchronize();
			timeUpdatePrimDual.stop();


			timeScalePrimDual.cont();

			scalePrimDual<<<1, nrOfThreads>>>(myComputingStructures->dev_x, myComputingStructures->dev_x_hat, myComputingStructures->dev_y, myComputingStructures->dev_y_hat, myPackingLinearProgram->r, myPackingLinearProgram->c, eps, myComputingStructures->dev_keepOnRunning,myComputingStructures->countITERATIONS);
			cudaThreadSynchronize();
			//std::cout<<myComputingStructures->countITERATIONS<<". ---- "<<*(myComputingStructures->condition8)<<std::endl;
			timeScalePrimDual.stop();
			//if(myComputingStructures->countITERATIONS%1000==0)std::cout<<myComputingStructures->countITERATIONS<<std::endl;

			//cout <<"Kernel says: " <<  	cudaGetErrorString (cudaGetLastError () )  << endl;

		}

		//	normalizeWeightVectors<<<2, nrOfThreads>>>(dev_p, dev_p_hat, r, c);

		std::cout << "Number of iterations of WHILE: " << myComputingStructures->countITERATIONS << std::endl;



		std::cout <<"Kernel says: " <<  	cudaGetErrorString (cudaGetLastError () )  << std::endl;


		myPerformanceStructures->normalizationTime = timeNormalize.secs();
		myPerformanceStructures->changeOfLHSTime = timeChangeLHS.secs();
		myPerformanceStructures->alphaComputationTime = timeComputeAlpha.secs();
		myPerformanceStructures->updateOfPrimalDualTime = timeUpdatePrimDual.secs();
		myPerformanceStructures->scalePrimalDualTime = timeScalePrimDual.secs();

		std::cout << "Time for normalization        : " << myPerformanceStructures->normalizationTime << "seconds" << std::endl;
		std::cout << "Time for change in LHS        : " << myPerformanceStructures->changeOfLHSTime << "seconds" << std::endl;
		std::cout << "Time for determining alpha    : " << myPerformanceStructures->alphaComputationTime << "seconds" << std::endl;
		std::cout << "Time for prim-dual updates    : " << myPerformanceStructures->updateOfPrimalDualTime << "seconds" << std::endl;
		std::cout << "Time for scale prim-dual      : " << myPerformanceStructures->scalePrimalDualTime << "seconds" << std::endl;
		std::cout << "TOTAL CUDA Computation time   : " << myPerformanceStructures->totalComputationTime() << " seconds" << std::endl;
	}

	void ParallelYoungDerandomized::writingResultsToFileOnlyTimes(string pathToFileNameOfResults, string mode, float something, float second, char s)
	{
		if(s) myComputingStructures->sep=s;
		//int error;
		//void *cbdata;
		std::ofstream resultFile;
		resultFile.open(pathToFileNameOfResults.c_str(),std::ios::app);

		if(resultFile.is_open())
		{
			if(mode=="newLine")
			{
				resultFile.width(7);
				resultFile.setf(std::ios_base::right);
				resultFile<<myComputingStructures->r<<" "<<myComputingStructures->sep;

				resultFile.width(7);
				//resultFile.setf(std::ios_base::right);
				resultFile<<myComputingStructures->c<<" "<<myComputingStructures->sep;

				resultFile.width(6);
				//resultFile.setf(std::ios_base::right);
				resultFile<<myPackingLinearProgram->d<<" "<<myComputingStructures->sep;

				resultFile.width(10);
				//resultFile.setf(std::ios_base::right);
				resultFile<<(myComputingStructures->r)*(myComputingStructures->c)*pow(0.5,myPackingLinearProgram->d)<<" "<<myComputingStructures->sep;


				resultFile.unsetf(std::ios_base::right);
				resultFile.setf(std::ios_base::left);
				resultFile<<" ";
				resultFile.width(6);
				resultFile<<eps<<myComputingStructures->sep;

				resultFile.precision(5);
				resultFile<<" ";
				resultFile.width(13);
				resultFile<<myPerformanceStructures->totalComputationTime()<<myComputingStructures->sep;
			}
			else if(mode=="endLine")
			{
				/*resultFile.precision(5);
				resultFile<<" ";
				resultFile.width(13);
				resultFile<<myPerformanceStructures->totalComputationTime()<<myComputingStructures->sep;*/

				resultFile.precision(7);
				resultFile<<" ";
				resultFile.width(13);
				resultFile<<something<<" "<<myComputingStructures->sep;

				resultFile.width(6);
				//resultFile.setf(std::ios_base::right);
				resultFile<<second<<std::endl;

			}
		}
		resultFile.close();
		return;
	}


	ParallelYoungDerandomized::~ParallelYoungDerandomized()
	{

	}

}




