#include <cuGK_PC_solver.h>

// auxillary functions

// ********************************** DEVICE error checking ***********************************
void CHECK_CUDA_ERROR(cudaError_t error, char *name)
{
    std::cout << name << "->" << "DEVICE status: " << cudaGetErrorString(error) << std::endl; 
}



template <class myType>
__device__ void warpReduce(volatile myType* data) 
{	
	index tx = threadIdx.x;
	data[tx] += data[tx + 32];	
	data[tx] += data[tx + 16];	
	data[tx] += data[tx + 8];	
	data[tx] += data[tx + 4];	
	data[tx] += data[tx + 2];	
	data[tx] += data[tx + 1];
};

template <class myType>
__device__ void warpReduceMin(volatile myType* data, volatile  index* data_idx, index tid) {
	if (data[tid] > data[tid + 32]) {data[tid]  = data[tid + 32]; data_idx[tid]=data_idx[tid+32];};
	if (data[tid] > data[tid + 16]) {data[tid]  = data[tid + 16]; data_idx[tid]=data_idx[tid+16];};
	if (data[tid] > data[tid + 8])  {data[tid]  = data[tid + 8];  data_idx[tid]=data_idx[tid+8];};
	if (data[tid] > data[tid + 4])  {data[tid]  = data[tid + 4];  data_idx[tid]=data_idx[tid+4];};
	if (data[tid] > data[tid + 2])  {data[tid]  = data[tid + 2];  data_idx[tid]=data_idx[tid+2];};
	if (data[tid] > data[tid + 1])  {data[tid]  = data[tid + 1];  data_idx[tid]=data_idx[tid+1];};
};

template <class myType>
__device__ myType reduceSum(volatile myType* data)
{
     index tx   = threadIdx.x;
	 myType sum = INFTY;
	 
	 	 
	if (blockDim.x >=1024) {if (tx < 512) data[tx] += data[tx+512]; __syncthreads();}	
	if (blockDim.x >= 512) {if (tx < 256) data[tx] += data[tx+256]; __syncthreads();}
	if (blockDim.x >= 256) {if (tx < 128) data[tx] += data[tx+128]; __syncthreads();}
	if (blockDim.x >= 128) {if (tx < 64)  data[tx] += data[tx+64]; __syncthreads();}
	if (tx < 32) warpReduce<myType>(data);
	
	if (tx==0)
	{
		sum = data[0];
	}	
	return sum;
};

template <class myType>
__device__ void reduceMin(volatile myType* data,myType *min,volatile index *data_idx,index *min_idx)
{
     index tx = threadIdx.x;
	 //printf("Dimenzija bloka je %d\n",blockDim.x);
	 	 
	if (blockDim.x >=1024) {if (tx < 512) { if (data[tx] > data[tx + 512]) {data[tx] = data[tx+512]; data_idx[tx] = data_idx[tx+512];} } __syncthreads();}	
	if (blockDim.x >= 512) {if (tx < 256) { if (data[tx] > data[tx + 256]) {data[tx] = data[tx+256]; data_idx[tx] = data_idx[tx+256];} } __syncthreads();}
	if (blockDim.x >= 256) {if (tx < 128) { if (data[tx] > data[tx + 128]) {data[tx] = data[tx+128]; data_idx[tx] = data_idx[tx+128];} } __syncthreads();}
	if (blockDim.x >= 128) {if (tx < 64)  { if (data[tx] > data[tx + 64] ) {data[tx] = data[tx+64];  data_idx[tx] = data_idx[tx+64]; } } __syncthreads();}
	if (tx < 32) warpReduceMin<myType>(data,data_idx, tx);
	
	if (tx==0)
	{
		*min_idx = data_idx[0];
		*min     = data[0];
	}
};





// ********************************** DATA STRUCTURES *****************************************


/* ********************************** device functions ******************************** */

__device__ dreal atomicExch(dreal* address, dreal val)
{
	unsigned long long int* addr = (unsigned long long int*) address;
	unsigned long long int old = *addr, assumed;
	
	do
	{
		assumed = old;
		old = atomicCAS(addr, assumed,__double_as_longlong(0.0f+__double_as_longlong(assumed)));
	} 
	while (assumed != old);
	return __longlong_as_double(old);
}




// definitions

__global__ void cuCalculateP(matrix_type *A,real *b,dreal *P,dreal *x, dreal *y, dreal delta, integer M, integer N)
{

/*
	kernel calculates P[q] = min_i b(i)/A(i,q), q=1,2,...,m  -> matrix A in a column-wise representation
	call: <<< n,threadsPerBlock >>>
	parallelization:  process matrix A: -> assign a block per column,
					                    -> for row elements assign thread in the block
	
*/	
	index bx,tx;
	index gID = threadIdx.x+blockIdx.x*blockDim.x;
	__shared__ dreal cache[512];	
	dreal val;


	for(index tID=gID;tID<N;tID+=blockDim.x*gridDim.x)
	{
		x[tID] = 0.0;
	}

	for(index tID=gID;tID<M;tID+=blockDim.x*gridDim.x)
	{
		y[tID] = delta/b[tID];	
   		
	}
		
	for(bx = blockIdx.x; bx<N; bx += gridDim.x)
	{  // make sure the blocks scan all the columns
		cache[threadIdx.x] = INFTY;	// init. cache
		__syncthreads();
		for(tx=threadIdx.x;tx<M;tx += blockDim.x)
		{
			//val = dev_A[tx+bx*M]>0 ? tex1D(tex_b,tx)/dev_A[tx+bx*M] : INFTY;
			val = b[tx]/fabsf(A[tx+bx*M]);
			if(cache[threadIdx.x]>val) cache[threadIdx.x] = val;
			//printf("TH-%d: %f \n",tx,val);
		}
	   	
		__syncthreads();
		tx = threadIdx.x;	
		// logarithmic reduction to find min per column (block)
		for (index i=blockDim.x>>1;i>0;i>>=1)
		{ 			
			if(tx < i)
			{
				if (cache[tx] > cache[tx+i])
				{
					cache[tx] = cache[tx+i];				
				}
			}
			__syncthreads();
		}
	
		// one thread per block assigns min to corresponding index of P	
		if (threadIdx.x==0) 
		{
			P[bx] = cache[0];
            // printf("bx-%d: %f\n",bx,P[bx]);
		}
	}
	
}; 

__global__ void cuCalculateR(matrix_type *A, real *c, dreal *y,dreal *R,index *R_idx, dreal *rho, index *q, integer M,integer N)
{
/*     
	kernel calculates R[j] = sum_i A[i][j] y[i] /c[j], j=1,2,...,n i.e. length_y(j)
	call: <<< numBlocks,numThreads >>>
	parallelization: -> assign a block per column
	                 -> for every row element assign a thread per block	

*/
    __shared__ dreal cache[256]; // extern shared memory
	
	index tx,bx,tID;
	index gID = threadIdx.x+blockIdx.x*blockDim.x;	// global index
       		
	for (bx = blockIdx.x; bx<N; bx += gridDim.x)
	{	// use a grid to scan all the elements - shift blocks
	        cache[threadIdx.x] = 0.0; 
		__syncthreads();
		
		for (tx = threadIdx.x; tx < M; tx += blockDim.x) // loop until all the elements of the vector are processed
		{
			cache[threadIdx.x] += fabsf(A[tx+bx*M])*y[tx];			
			
		}
		// synchronize threads	
		__syncthreads();   
		// logarithmic reduction to find sum of elements		
		dreal sum = reduceSum(cache); // DEVICE function - logaritmic sum 		
		
		//printf("\nbl-%d: cache[%d]: %e",blockIdx.x,threadIdx.x,cache[threadIdx.x]);
		tx = threadIdx.x;
		if (tx == 0) 
		{
		    // printf("tex_c[%d]=%f\n",bx,tex1D(tex_c,bx));
			R[bx] = sum/c[bx];// assert(tex_c[bx]>0)
            // printf("R[%d]=%e\n",bx,R[bx]);			
		}	
		
		__syncthreads();
		
	} // end grid
	
	// set indices for finding min
	for(tID = gID;tID<N;tID += blockDim.x*gridDim.x) 
	{
		  R_idx[tID] = tID;			  
	}	
	
	// only one thread per grid assigns intial values to rho,q.		
	if (gID==0)
	{
		(*rho) = R[0];
		(*q) = 0; 
		//printf("\ninit: q: %d  rho: %e",(*q),(*rho));
	}
	
	
// OK !	
}; // END ... 

// --------------------------------------------------------------------------------

__global__ void cuFindRho(dreal *R, index *R_idx,index *q,dreal *rho, integer N)
{
/*
	new method to find min:
	-> paralellization: 1 block, 1024 threads
*/
	index tx = threadIdx.x;
	index gID;
	
	__shared__ dreal mins[1024];
	__shared__ index mins_idx[1024];
	
	mins[tx]     = INFTY;
	mins_idx[tx] = tx;
	   
	__syncthreads();
	
	// set min_idx in cache	
	for(gID=tx;gID<N;gID+=blockDim.x)
	{
		if(mins[tx]>R[gID]) 
		{
			mins[tx] = R[gID];
			mins_idx[tx] = R_idx[gID];
		}
		__syncthreads();
	}
	//printf("rho: %lf, q: %d \n",(*rho),(*q));
	reduceMin<dreal>(mins,rho,mins_idx,q);	
};


 __global__ void cuUpdatePrimalDual(matrix_type *A, real *b, real *c, dreal *x,dreal *y, dreal *P,dreal *R,  
                         index *min_idx, dreal *primal,dreal *dual, dreal *zcDual,dreal *rho,dreal eps, dreal delta, integer M)
{
/*
    primal-dual update: for the given pair (q, rho) kernel updates 
		x[q] <- x[q] + b[p]/A[p,q]
		y[i] <- y[i](1+eps* (b[p]/A[p,q])/ (b[i]/A[i,q])
		
		primal <- primal + c[q]*b[p]/A[p,q]
		dual   <- dual + (c[q] b[p]/A[p,q])/rho
		
			
	call: <<< numBlocks, numThreads >>>
	
	devide vector in blocks and find min per block, assign one thread per block to update shared variable rho,q.

*/	
    //   if(*dualValue<=1.0)
	{
	index q = *min_idx; 
	
	index gID = threadIdx.x + blockIdx.x * blockDim.x;  
	index tID;
  
	// dual update
	for (tID=gID;tID<M;tID+=gridDim.x*blockDim.x)
	{ // shift grid to cover all the elements of vector
		// id = tID+q*N;
		y[tID] *= ( 1.0 + eps*P[q]*(A[tID+q*M])/b[tID]); 
	}	
	// primal update
	if(gID == 0)
	{
		//printf("\nPRIJE \nq: %d rho: %e \n-> primal: %e, dual: %e\n",q,(*rho),(*primalValue),(*dualValue));
		x[q]  += P[q];				
		(*primal) += (dreal)c[q]*P[q];		
		(*dual) += eps*(dreal)c[q]*P[q]*(*rho);
		if((*dual)>=1.0) *zcDual = *dual;
		//printf("\nPOSLIJE \nq: %d rho: %e \n-> primal: %e, dual: %e\n",q,(*rho),(*primalValue),(*dualValue));
              
	}
     }
 } ; // END ...




__global__ void cuScalePrimalDual(dreal *x, dreal *y, dreal primalScale, dreal dualScale, integer N)
{
   index gID = threadIdx.x + blockIdx.x*blockDim.x;
   index tID;
   
   for(tID=gID; tID<N;tID+=gridDim.x*blockDim.x) x[tID] *= primalScale;
   for(tID=gID; tID<N;tID+=gridDim.x*blockDim.x) y[tID] *= dualScale;   
 
}; // END ...


// ******************************** DEVICE, HOST rutine ****************************************


LP_status cuGKPCSol(matrix_type *A, real *b, real *c, dreal *x, dreal *y, integer m, integer n, dreal omega)
{
      if(VERBOSE)
	  {
		std::cout << std::endl << std::endl;
		std::cout << "--------------------------------------------------------------------------" << std::endl;
		std::cout << "              CUDA Garg-Koenemann packing-covering LP solver              " << std::endl;
		std::cout << "--------------------------------------------------------------------------" << std::endl;	
	  }
	  
	  
	  // CUDA prep
	  // reset DEVICE
	cudaDeviceReset();
	  
	  // make initial cal for memory deallocation
	cudaFree(NULL);
	
	dreal *h_zcDual; // ZC 
	
	  
	Timer compute_time;
	compute_time.start();
	
	// HOST parameters
	//dreal h_eps = 1-pow(1.0+omega, (double) -1.0/1.5);		
	dreal eps = 3.0/2.0 - sqrt(1.0+8.0/(omega+1.0))/2.0;		
	dreal delta = (1+eps)*pow(((1+eps)*m),(-1.0/eps));
	
	// set initial primal dual value
	dreal primal = 0.0;
	dreal dual   = m*delta;
 
	// scale factors
	dreal primalScale = 1.0/(1.0-(log(delta)/log(1+eps)));
	dreal dualScale = 1.0; // =(*rho) later
	
	// DEVICE pointers 
	matrix_type *dev_A;
	real *dev_b, *dev_c;
	dreal *dev_x, *dev_y, *P,*R; // x,y, auxiliary arrays 
	dreal *dev_primal,*dev_dual; // primal and dual value on DEVICE
	dreal *rho; // min of R
	 
	index *R_idx; // tracing min elements in arrays on DEVICE
	dreal *zcDual;  // DEVICE pointer on zero-copy variable
	index *q;   // index of min element on DEVICE
	
	
	// DEVICE memory allocation
	cudaMalloc((void **)&dev_A,m*n*sizeof(matrix_type));
	cudaMalloc((void **)&dev_b,m*sizeof(real));
	cudaMalloc((void **)&dev_c,n*sizeof(real));
	cudaMalloc((void **)&dev_x, n*sizeof(dreal) );
	cudaMalloc((void **)&dev_y, m*sizeof(dreal) );
	
	
	cudaMalloc((void **)&P, n*sizeof(dreal) );
	cudaMalloc((void **)&R, n*sizeof(dreal) ); 
	cudaMalloc((void **)&R_idx, n*sizeof(index)); 
		
	cudaMalloc((void **)&dev_primal,sizeof(dreal));
	cudaMalloc((void **)&dev_dual,sizeof(dreal));
	cudaMalloc((void **)&rho,sizeof(dreal));
	cudaMalloc((void **)&q,sizeof(index));
	
		
  	//zero-copy memory variable 
	cudaHostAlloc((void **)&h_zcDual,sizeof(dreal),cudaHostAllocWriteCombined | cudaHostAllocMapped);
	*h_zcDual = dual;
	cudaHostGetDevicePointer(&zcDual,h_zcDual,0);
	
	
	// copy values from HOST to DEVICE	
	// copy input on DEVICE
	cudaMemcpy(dev_A, A, m*n*sizeof(matrix_type), cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, m*sizeof(real), cudaMemcpyHostToDevice);
	cudaMemcpy(dev_c, c, m*sizeof(real), cudaMemcpyHostToDevice);
	
	// copy initial primal-dual solution
	cudaMemcpy(dev_x, x, n*sizeof(dreal), cudaMemcpyHostToDevice);
	cudaMemcpy(dev_y, y, m*sizeof(dreal), cudaMemcpyHostToDevice);
	
	// primal-dual value
	cudaMemcpy(dev_primal,&primal,sizeof(dreal),cudaMemcpyHostToDevice); 
	cudaMemcpy(dev_dual,&dual,sizeof(dreal),cudaMemcpyHostToDevice); 
	

	// computation step
	if(VERBOSE)
	{
	std::cout << "----------------------- parameters -------------------------------------" << std::endl;
	std::cout << "omega: " << omega << "\t eps: " << eps << "\t delta: " << delta << std::endl; 
    }
   
	// KERNEL preprocessing -- calculate vector P
	if(VERBOSE) std::cout << "preprocessing step: KERNEL 01 ...";
		
    cuCalculateP<<< 2000,512 >>> (dev_A,dev_b,P,dev_x,dev_y,delta,m,n); 
    cudaDeviceSynchronize();
	
	if(VERBOSE) std::cout << "done." << std::endl;
	
	
	integer iter = 0;
				
	// iterative procudure
	
	if(VERBOSE) std::cout << "iterative procedure: kernel 2,3,4 ..." << std::endl;

	while (*h_zcDual<1.0) // stop condition: dualValue>=1.0
	{ 	
		iter ++;
		// kernel 2: calculate vector R 
		cuCalculateR<<< 2000,256 >>> (dev_A,dev_c, dev_y,R,R_idx, rho,q,m,n);
		cudaDeviceSynchronize();  // sync HOST - DEVICE
							
		// kernel 3: find pair (q,rho)
		cuFindRho<<< 1, 1024 >>>(R,R_idx,q,rho,n);
		cudaDeviceSynchronize(); // sync HOST - DEVICE
					
	    // kernel 4: update primal and dual solution 
		cuUpdatePrimalDual <<< 10, 512 >>> (dev_A, dev_b, dev_c, dev_x,dev_y,P,R,  
                         q, dev_primal,dev_dual, zcDual,rho,eps,delta,m);
		cudaDeviceSynchronize();  // sync HOST - DEVICE
			
	

	}
	// find dual scaling factor 
	// kernel 2: calculate vector R 
	
	cuCalculateR<<< 2000,256 >>> (dev_A,dev_c, dev_y,R,R_idx, rho,q,m,n);
	cudaDeviceSynchronize();  // sync HOST - DEVICE
			
	// kernel 3: find pair (q,rho)
	
	cuFindRho<<< 1, 1024 >>>(R,R_idx,q,rho,n);
	cudaDeviceSynchronize(); // sync HOST - DEVICE
	

	// end finding dual scaling factor
	if(VERBOSE) std::cout << "done." << std::endl;
	if (VERBOSE) std::cout << std::endl << "DEVICE status: " << cudaGetErrorString(cudaGetLastError()) << std::endl;
	

	if(VERBOSE) std::cout << "copying data from DEVICE to HOST and scaling ..." << std::endl;
    // copy solution from DEVICE-a to HOST
	
	cudaMemcpy(&dualScale,rho,sizeof(dreal),cudaMemcpyDeviceToHost);
	cudaMemcpy(&primal,dev_primal,sizeof(dreal),cudaMemcpyDeviceToHost);
	cudaMemcpy(&dual,dev_dual,sizeof(dreal),cudaMemcpyDeviceToHost);
	
	
	// scale primal-dual value
	dualScale = 1.0/dualScale;
    cuScalePrimalDual<<< 10,512 >>>(dev_x,dev_y,primalScale,dualScale,n);
	
	// copy solution back to HOST
	cudaMemcpy(x,dev_x,n*sizeof(dreal),cudaMemcpyDeviceToHost);
	cudaMemcpy(y,dev_y,m*sizeof(dreal),cudaMemcpyDeviceToHost);
	

	primal *= primalScale;
	dual   *= dualScale;

	
	if(VERBOSE) std::cout << "done." << std::endl;
	
	compute_time.stop();
	
	integer nz = countNZ(A,m*n);

    if(VERBOSE)
	{
    std::cout << "\nDATA SET: rows: " << m << ", cols: " << m << ", density: " << nz/(double)(n*m)*100<< "%" << std::endl;
	std::cout << "---------------------------------------------------------------" << std::endl;
			
	std::cout << "Number of iterations:                           | " << iter << std::endl;
	std::cout << "----------------------------------------------------------------" << std::endl;
	std::cout << "*** Compute time:                               | " << compute_time.secs() << std::endl;
	std::cout << "----------------------------------------------------------------" << std::endl;
	
	printf(" primal value: %f       \n\r dual value: %f     \n\r approx. ratio: %f \n",primal,dual,dual/primal);
	std::cout << "---------------------------------------------------------------------------------------" <<std::endl;
	}
	
	
	cudaFree(dev_A);
	cudaFree(dev_b);
	cudaFree(dev_c);
	cudaFree(dev_x);
	cudaFree(dev_y);
	cudaFree(P);
	cudaFree(R);
	cudaFree(R_idx);
	
	if(dual/primal<=1+omega) return LP_FEAS;
	else return LP_INF;
}
