#include "cudanumpy.h"
#include <math.h>

__global__
void _row_major_to_column_major_dbl(double *in, double *out, const int x_dim, int y_dim)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x;
	if(tid < (x_dim*y_dim)){
		int i, f, tmp = 0;	
		//Primitive way of performing math.floor(tid/x_dim);
		f = tid % x_dim;
		if(f==0)
			tmp = tid/x_dim;
		else
			tmp = (tid-f)/x_dim;
			
		i = f * y_dim + tmp;
				
		out[tid] = in[i];
	}
}

__global__
void _column_major_to_row_major_dbl(double *in, double *out, const int x_dim, int y_dim)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x;
	if(tid < (x_dim*y_dim)){
		int i, f, tmp = 0;
		
		//Primitive way of performing math.floor(tid/x_dim);	
		f = tid % x_dim;
		if(f==0)
			tmp = tid/x_dim;
		else
			tmp = (tid-f)/x_dim;
			
		i = f * y_dim + tmp;
				
		out[i] = in[tid];
	}
}

__global__
void _pad_after_fft(cufftDoubleComplex *in, const int nelements, const int guard){
	//Notice: off by one!
	const int tid = blockDim.x * blockIdx.x + threadIdx.x + 1;
	if(tid <= guard){
		in[nelements-tid].x = in[tid].x;
		in[nelements-tid].y = (in[tid].y) * -1;
	}
}

/* DOUBLE */
/**********/

// np.op.reduce
__global__
void _device_reduce_dbl(double *in, double *out, const int range, const int stride, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x;
	if(tid < ceil((float)nelements / (float)range)){
		double tmp = in[tid*range*stride]; //Initial value
		if(op == cuda_sub && tid > 0) //Slidely inefficient but necessary
			tmp *= -1;
		
		int i, start = tid*range+1, end = tid*range+range;


		for(i = start ; i < end && i < nelements ; i++){
			//As op is const, the compiler should be able to optimize the switch
			switch(op){ 
				case cuda_add:
					tmp += in[i*stride];
					break;
				case cuda_sub:
					tmp -= in[i*stride];
					break;
				case cuda_div:
					break;
				case cuda_mult:
					tmp *= in[i*stride];
					break;
				case cuda_max:
					if(in[i*stride] > tmp)
						tmp = in[i*stride];
					break;
				case cuda_min:
					if(in[i*stride] < tmp)
						tmp = in[i*stride];
					break;
			}
		}	
		__syncthreads();
		out[tid] = tmp;
	}
}

__global__
void _device_ufunc_m_dbl(double *out, double *A, const int range, 
			         const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_sq:
				out[i] = A[i] * A[i];
				break;
			case cuda_sqrt:
				out[i] = sqrt(A[i]);
				break;
			case cuda_abs:
				out[i] = abs(A[i]);
				break;
		}
	}
}

__global__
void _device_ufunc_ms_dbl(double *out, double *A, const double B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B;
				break;
			case cuda_sub:
				out[i] = A[i] - B;
				break;
			case cuda_div:
				out[i] = A[i] / B;
				break;
			case cuda_mult:
				out[i] = A[i] * B;
				break;
			case cuda_leq:
				out[i] = A[i] <= B;
				break;
		}
	}
}

__global__
void _device_ufunc_sm_dbl(double *out, double *A, const double B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = B + A[i];
				break;
			case cuda_sub:
				out[i] = B - A[i];
				break;
			case cuda_div:
				out[i] = B / A[i];
				break;
			case cuda_mult:
				out[i] = B * A[i];
				break;
			case cuda_leq:
				out[i] = B >= A[i]; // A[i] <= B
				break;
		}
	}
}

__global__
void _device_ufunc_mm_dbl(double *out, double *A, double *B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B[i];
				break;
			case cuda_sub:
				out[i] = A[i] - B[i];
				break;
			case cuda_div:
				out[i] = A[i] / B[i];
				break;
			case cuda_mult:
				out[i] = A[i] * B[i];
				break;
			case cuda_leq:
				out[i] = A[i] <= B[i];
				break;
		}
	}
}


/* LONG */
/********/

// np.op.reduce
__global__
void _device_reduce_lng(long *in, long *out, const int range, const int stride, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x;
	
	if(tid < ceil((float)nelements / (float)range)){
		long tmp = in[tid*range*stride]; //Initial value
		if(op == cuda_sub && tid > 0) //Slidely inefficient but necessary
			tmp *= -1;
		
		int i, start = tid*range+1, end = tid*range+range;
			
		for(i = start ; i < end && i < nelements ; i++){
			switch(op){ 
				case cuda_add:
					tmp += in[i*stride];
					break;
				case cuda_sub:
					tmp -= in[i*stride];
					break;
				case cuda_div:
					break;
				case cuda_mult:
					tmp *= in[i*stride];
					break;
				case cuda_max:
					if(in[i*stride] > tmp)
						tmp = in[i*stride];
					break;
				case cuda_min:
					if(in[i*stride] < tmp)
						tmp = in[i*stride];
					break;
			}
		}
		__syncthreads();
		out[tid] = tmp;
	}
}

__global__
void _device_ufunc_m_lng(long *out, long *A, const int range, 
			         const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_sq:
				out[i] = A[i] * A[i];
				break;
			case cuda_sqrt:
				out[i] = (long)sqrt((double)A[i]);
				break;
			case cuda_abs:
				out[i] = abs(A[i]);
				break;
		}
	}
}

__global__
void _device_ufunc_ms_lng(long *out, long *A, const long B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B;
				break;
			case cuda_sub:
				out[i] = A[i] - B;
				break;
			case cuda_div:
				out[i] = A[i] / B;
				break;
			case cuda_mult:
				out[i] = A[i] * B;
				break;
			case cuda_band:
				out[i] = (A[i] & B);
				break;
			case cuda_bor:
				out[i] =  (A[i] | B);
				break;
		}
	}
}

__global__
void _device_ufunc_sm_lng(long *out, long *A, const long B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = B + A[i];
				break;
			case cuda_sub:
				out[i] = B - A[i];
				break;
			case cuda_div:
				out[i] = B / A[i];
				break;
			case cuda_mult:
				out[i] = B * A[i];
				break;
			case cuda_band:
				out[i] = (A[i] & B);
				break;
			case cuda_bor:
				out[i] =  (A[i] | B);
				break;
		}
	}
}

__global__
void _device_ufunc_mm_lng(long *out, long *A, long *B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B[i];
				break;
			case cuda_sub:
				out[i] = A[i] - B[i];
				break;
			case cuda_div:
				out[i] = A[i] / B[i];
				break;
			case cuda_mult:
				out[i] = A[i] * B[i];
				break;
			case cuda_band:
				out[i] = (A[i] & B[i]);
				break;
			case cuda_bor:
				out[i] =  (A[i] | B[i]);
				break;
		}
	}
}

/* EQUALITY - LONG */
/*******************/
__global__
void _device_ufunc_equality_ms_lng(char *out, long *A, const long B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_eq:
				out[i] = (char) A[i] == B;
				break;
			case cuda_leq:
				out[i] = (char) (A[i] <= B);
				break;
		}
	}
}

__global__
void _device_ufunc_equality_sm_lng(char *out, long *A, const long B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_eq:
				out[i] = (char) A[i] == B;
				break;
			case cuda_leq:
				out[i] = (char) (B <= A[i]);
				break;
		}
	}
}

__global__
void _device_ufunc_equality_mm_lng(char *out, long *A, long *B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){
			case cuda_eq:
				out[i] = (char) A[i] == B[i];
				break;
			case cuda_leq:
				out[i] = (char) A[i] <= B[i];
				break;
		}
	}
}

/* EQUALITY - DOUBLE */
/*********************/
__global__
void _device_ufunc_equality_ms_dbl(char *out, double *A, const double B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_eq:
				out[i] = (char) A[i] == B;
				break;
			case cuda_leq:
				out[i] = (char) (A[i] <= B);
				break;
		}
	}
}

__global__
void _device_ufunc_equality_sm_dbl(char *out, double *A, const double B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_eq:
				out[i] = (char) A[i] == B;
				break;
			case cuda_leq:
				out[i] = (char) (B <= A[i]);
				break;
		}
	}
}

__global__
void _device_ufunc_equality_mm_dbl(char *out, double *A, double *B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){
			case cuda_eq:
				out[i] = (char) A[i] == B[i];
				break;
			case cuda_leq:
				out[i] = (char) A[i] <= B[i];
				break;
		}
	}
}

/* FLOAT */
/*********/
/* DEPRECATED
// Floats
__global__
void _device_reduce(float *in, float *out, const int range, const int stride, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x;
	
	float tmp = in[tid*range*stride]; //Initial value
	if(op == cuda_sub && tid > 0) //Slidely inefficient but necessary
		tmp *= -1;
	
	int i, start = tid*range+1, end = tid*range+range;
		
	for(i = start ; i < end && i < nelements ; i++){
		//As op is const, the compiler should be able to optimize the switch
		switch(op){ 
			case cuda_add:
				tmp += in[i*stride];
				break;
			case cuda_sub:
				tmp -= in[i*stride];
				break;
			case cuda_div:
				break;
			case cuda_mult:
				tmp *= in[i*stride];
				break;
			case cuda_max:
				if(in[i*stride] > tmp)
					tmp = in[i*stride];
				break;
			case cuda_min:
				if(in[i*stride] < tmp)
					tmp = in[i*stride];
				break;
		}
	}
	
	out[tid] = tmp;
}

__global__
void _device_ufunc_m(float *out, float *A, const int range, 
			         const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_sq:
				out[i] = A[i] * A[i];
				break;
		}
	}
}

__global__
void _device_ufunc_ms(float *out, float *A, const float B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B;
				break;
			case cuda_sub:
				out[i] = A[i] - B;
				break;
			case cuda_div:
				out[i] = A[i] / B;
				break;
			case cuda_mult:
				out[i] = A[i] * B;
				break;
			case cuda_leq:
				out[i] = A[i] <= B;
				break;
		}
	}
}

__global__
void _device_ufunc_sm(float *out, float *A, const float B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = B + A[i];
				break;
			case cuda_sub:
				out[i] = B - A[i];
				break;
			case cuda_div:
				out[i] = B / A[i];
				break;
			case cuda_mult:
				out[i] = B * A[i];
				break;
			case cuda_leq:
				out[i] = B >= A[i]; // A[i] <= B
				break;
		}
	}
}

__global__
void _device_ufunc_mm(float *out, float *A, float *B, 
			   const int range, const int nelements, const cuda_ops op)
{
	const int tid = blockDim.x * blockIdx.x + threadIdx.x,
			  start = tid*range, 
			  end = tid*range+range;
	int i;
	for(i=start ; i<end && i<nelements ; i++){
		switch(op){ 
			case cuda_add:
				out[i] = A[i] + B[i];
				break;
			case cuda_sub:
				out[i] = A[i] - B[i];
				break;
			case cuda_div:
				out[i] = A[i] / B[i];
				break;
			case cuda_mult:
				out[i] = A[i] * B[i];
				break;
			case cuda_leq:
				out[i] = A[i] <= B[i];
				break;
		}
	}
}
*/
