#define CUDAFUNC

#include <cutil_inline.h>
#include <cublas.h>
#include <cufft.h>
#include <math.h>
#include "cudanumpy.h"

//Including all the kernels
#include "cudafunc_kernel.cu"

int multiprocessorcount;
int warpsize;

void checkCUDAError(const char *msg)
{
    cudaError_t err = cudaGetLastError();
    if( cudaSuccess != err) 
    {
        fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
        exit(-1);
    }                         
}

cuda_ops maptocudaop(char *npy_op)
{
	if(strcmp(npy_op, "add") == 0)
		return cuda_add;
	else if(strcmp(npy_op, "subtract") == 0)
		return cuda_sub;
	else if(strcmp(npy_op, "multiply") == 0)
		return cuda_mult;
	else if(strcmp(npy_op, "divide") == 0)
		return cuda_div;
	else if(strcmp(npy_op, "square") == 0)
		return cuda_sq;
	else if(strcmp(npy_op, "sqrt") == 0)
		return cuda_sqrt;
	else if(strcmp(npy_op, "absolute") == 0)
		return cuda_abs;
	else if(strcmp(npy_op, "maximum") == 0)
		return cuda_max;
	else if(strcmp(npy_op, "minimum") == 0)
		return cuda_min;
	//Equality
	else if(strcmp(npy_op, "less_equal") == 0)
		return cuda_leq;
	else if(strcmp(npy_op, "equal") == 0)
		return cuda_eq;
	//Bit fideling
	else if(strcmp(npy_op, "bitwise_and") == 0)
		return cuda_band;
	else if(strcmp(npy_op, "bitwise_or") == 0)
		return cuda_bor;
	
	fprintf(stderr, "Unsupported command: %s\n", npy_op); 
	return cuda_unknown;
}

 
void checkcublasstatus(cublasStatus status)
{
	switch(status){
			case CUBLAS_STATUS_NOT_INITIALIZED:
			   printf("CUBLAS library not initialized\n");
			   break;
			case CUBLAS_STATUS_ALLOC_FAILED:
				printf("resource allocation failed\n");
				break;
			case CUBLAS_STATUS_INVALID_VALUE:
				printf("unsupported numerical value was passed to function\n");
				break;
			case CUBLAS_STATUS_ARCH_MISMATCH:
				printf("function requires an architectural feature absent from the architecture of the device\n");
				break;                               
			case CUBLAS_STATUS_MAPPING_ERROR:
				printf("access to GPU memory space failed\n");
				break;
			case CUBLAS_STATUS_EXECUTION_FAILED: 
				printf("GPU program failed to execute\n");
			    break;
		    case CUBLAS_STATUS_INTERNAL_ERROR:
				printf("an internal CUBLAS operation failed\n");
				break;
		}
}

extern "C"
int verify_device()
{
	int deviceCount;
	cudaGetDeviceCount(&deviceCount);
	if( deviceCount == 0 )
	{
		printf("No CUDA enabled devices available.\n");
		return -1;
	}
	
	cudaDeviceProp deviceProp;
	cudaGetDeviceProperties(&deviceProp, cutGetMaxGflopsDeviceId());
	printf ("Device \"%s\" found, compute capability %d.%d\n", deviceProp.name, deviceProp.major, deviceProp.minor);
	
	//multiprocessorcount = deviceProp.multiProcessorCount;
	//warpsize = deviceProp.warpSize;
	
	//deviceProp.major
	//deviceProp.minor
	
	return 1;
}

extern "C"
void sync_device(){
	cudaThreadSynchronize();
}

extern "C"
void malloc_host(void **ptr, int size){	
	cudaError_t e = cudaMallocHost(ptr, size);
	if(e != cudaSuccess)
		fprintf(stderr, "malloc_host failed. size:%d \n", size);
}

extern "C"
void free_host(void *ptr){
	cudaFreeHost(ptr);
	checkCUDAError("free_host");
}

extern "C"
void create_stream(int *s)
{
	cutilSafeCall(cudaStreamCreate(s));
}

extern "C"
void destroy_stream(int *s)
{
	cutilSafeCall(cudaStreamDestroy(*s));
}

extern "C"
void alloc_ondevice(CudaObject *co, int size, int itemsize)
{
	if(!CudaObject_CHECKSTATUS(co, DEVICE_ALLOC)){
		cudaError_t e = cudaMalloc((void**) &(CudaObject_DEVICEDATA(co)), size*itemsize);
		
		if(e != cudaSuccess)
			fprintf(stderr, "Allocation error uid:%d size:%d itemsize:%d \n", 
					CudaObject_UID(co), size, itemsize);
		
		CudaObject_STATUS(co) = DEVICE_ALLOC;
	}	
}

extern "C"
void freedevicedata(CudaObject *co)
{
    if(CudaObject_CHECKSTATUS(co, DEVICE_ALLOC)){
		cutilSafeCall(cudaFree(CudaObject_DEVICEDATA(co)));
		CudaObject_STATUS(co) &= DATA_ON_HOST;
	}
}

//Notice: Memory on the device has to be allocated previously
extern "C" 
void copy_todevice_async(char *data, CudaObject *co, int size, int itemsize, 
						 int offset, cudaStream_t stream=0)
{	
	if(CudaObject_CHECKSTATUS(co, DEVICE_ALLOC) && !CudaObject_CHECKSTATUS(co, DATA_ON_DEVICE))
	{
		if(size >1)
			cutilSafeCall(cudaMemcpyAsync(&(((double*)CudaObject_DEVICEDATA(co))[offset]), 
					&(((double*)data)[offset]), size*itemsize, 
					cudaMemcpyHostToDevice, stream));
		else
			cutilSafeCall(cudaMemcpy(&(((double*)CudaObject_DEVICEDATA(co))[offset]), 
					&(((double*)data)[offset]), size*itemsize, cudaMemcpyHostToDevice));
	}	
}

extern "C" 
void copy_todevice(char *data, CudaObject *co, int size, int itemsize)
{	
	alloc_ondevice(co, size, itemsize);
	
	if(!CudaObject_CHECKSTATUS(co, DATA_ON_DEVICE) && 
	   CudaObject_CHECKSTATUS(co, DEVICE_ALLOC)){
		cutilSafeCall(cudaMemcpy(CudaObject_DEVICEDATA(co), data, size*itemsize,
								 cudaMemcpyHostToDevice));
		
		CudaObject_STATUS(co) |= DATA_ON_DEVICE;
	}	
}

extern "C" 
void copy_fromdevice(char *data, CudaObject *co, int size, int itemsize)
{
	if(CudaObject_CHECKSTATUS(co, (DEVICE_ALLOC|DATA_ON_DEVICE)) &&
	   !CudaObject_CHECKSTATUS(co, (DATA_ON_HOST))){
		cudaThreadSynchronize();
		
		cutilSafeCall(cudaMemcpy(data, CudaObject_DEVICEDATA(co), 
					  size*itemsize, cudaMemcpyDeviceToHost) );

		CudaObject_STATUS(co) |= DATA_ON_HOST;
	}
}

extern "C"
void shutdowncuda()
{	
	cublasStatus status = cublasShutdown();	
	if (status != CUBLAS_STATUS_SUCCESS) {
		checkcublasstatus(status);
        fprintf (stderr, "shutdownblas: Shutdown of CUBLAS failed.\n");
    }
    cudaThreadExit(); //Free all resources      
}

extern "C"
void calc_xy(int ts, int *tx, int *b){
	(*tx)=ts;
	if(ts > 256){
		(*tx)=256;
		(*b) = ts / 256;
		if(ts % 256 != 0)
			(*b)++;			
	}
	if(*b > 65535)
		fprintf(stderr, "calc_xy: To many blocks! %d\n", *b);
}


/* DOUBLE */
/**********/
extern "C"
void ufunc_fft_dbl(CudaObject *in, CudaObject *out, long *dims, int nd, char *op)
{	
	cufftType type;
	if(strstr(op, "C2C") != NULL){
		type = CUFFT_Z2Z;
	}else if(strstr(op, "R2C") != NULL){
		type = CUFFT_D2Z;
	}else if(strstr(op, "C2R") != NULL){
		type = CUFFT_Z2D;	
	}else return;
	
	cufftHandle plan;
	if(nd == 1)
		cufftPlan1d(&plan, (int)dims[0], type, 1);
	else if(nd == 2)
		cufftPlan2d(&plan, (int)dims[0], (int)dims[1], type);
	else return;
		
	if(strstr(op, "iC2C") != NULL){
		cufftExecZ2Z(plan, (cufftDoubleComplex*)CudaObject_DEVICEDATA(in), 
				 (cufftDoubleComplex*)CudaObject_DEVICEDATA(out), 
				 CUFFT_INVERSE);
	}else if(strstr(op, "C2C") != NULL){
		cufftExecZ2Z(plan, (cufftDoubleComplex*)CudaObject_DEVICEDATA(in), 
				 (cufftDoubleComplex*)CudaObject_DEVICEDATA(out), 
				 CUFFT_FORWARD);
	}else if(strstr(op, "R2C") != NULL){//Forward		
		CUFFT_SAFE_CALL(cufftExecD2Z(plan, (cufftDoubleReal*)CudaObject_DEVICEDATA(in), 
				 (cufftDoubleComplex*)CudaObject_DEVICEDATA(out)));
		if(nd == 1){
			int g =(int)floor((double)dims[0]/(double)2.0);
			int tx=g, ty=1, b=1;
			calc_xy(g, &tx, &b);
			dim3 threads(tx, ty), grid(b);
			
			cudaThreadSynchronize();
			_pad_after_fft<<< grid, threads >>>((cufftDoubleComplex*)CudaObject_DEVICEDATA(out), (int)dims[0], g);
			cutilCheckMsg("Kernel execution failed");
		}
	}else if(strstr(op, "C2R") != NULL)//Reverse
		CUFFT_SAFE_CALL(cufftExecZ2D(plan, (cufftDoubleComplex*)CudaObject_DEVICEDATA(in), 
				 (cufftDoubleReal*)CudaObject_DEVICEDATA(out)));
	cufftDestroy(plan);
}

//BLAS 3
extern "C"
void ufunc_dot_dbl(CudaObject *A, CudaObject *B, CudaObject *C, int m, int n, int k)
{
	/* see www.netlib.org/blas/dgemm.f
		C=mxn, A=mxk and B=kxn
		C = alpha * op(A) * op(B) + beta * C 
	*/	
	double alpha = 1.0, beta = 0.0;
	int lda = max(1, m); //Same as ldc
    int ldb = max(1, k);
    
    double *tmp_a, *tmp_b, *tmp_c;
    cutilSafeCall(cudaMalloc((void**) &(tmp_a), m*k*sizeof(double)));
    cutilSafeCall(cudaMalloc((void**) &(tmp_b), k*n*sizeof(double)));
    cutilSafeCall(cudaMalloc((void**) &(tmp_c), m*n*sizeof(double)));
	int tx=1, ty=1, b=1;
	calc_xy(m*k, &tx, &b);
	dim3 threads_a(tx, ty);
    dim3 grid_a(b);
    tx=1, ty=1, b=1;
	calc_xy(k*n, &tx, &b);
	dim3 threads_b(tx, ty);
    dim3 grid_b(b);
    tx=1, ty=1, b=1;
	calc_xy(m*n, &tx, &b);
	dim3 threads_c(tx, ty);
    dim3 grid_c(b);    
    
    if(m > 1 && k > 1)
		_row_major_to_column_major_dbl<<<grid_a, threads_a>>>((double*)CudaObject_DEVICEDATA(A), tmp_a, m, k);
	if(k > 1 && n > 1)
		_row_major_to_column_major_dbl<<<grid_b, threads_b>>>((double*)CudaObject_DEVICEDATA(B), tmp_b, k, n);
    
	cublasInit();
    
    cublasDgemm('n','n', m, n, k, alpha, tmp_a, lda,
				tmp_b, ldb, beta, tmp_c, lda);
	
	cublasStatus status = cublasGetError();
	
	if(m > 1 && n > 1)
		_column_major_to_row_major_dbl<<<grid_c, threads_c>>>(tmp_c, (double*)CudaObject_DEVICEDATA(C), m, n);	
		
	cutilSafeCall(cudaFree(tmp_a));
	cutilSafeCall(cudaFree(tmp_b));
	cutilSafeCall(cudaFree(tmp_c));
	
	if (status != CUBLAS_STATUS_SUCCESS) {
		checkcublasstatus(status);
        fprintf (stderr, "ufunc_dot_dbl: call to cublasDgemm failed\n");
    }
}

extern "C"
void ufunc_reduce_dbl(CudaObject *A, int A_offset, int A_stride, 
				      char *op, void *f_out, int nelements, cudaStream_t stream=0)
{
	double *tmp;
	int ts = (int)ceil((float)nelements / (float)RANGE);
	int tx=ts, ty=1, b=1;
	calc_xy(ts, &tx, &b);
	dim3 threads(tx, ty), grid(b);      
	
	cutilSafeCall(cudaMalloc((void**) &tmp, ts*sizeof(double)));	
	
	cuda_ops cop = maptocudaop(op);
	_device_reduce_dbl<<< grid, threads, 0, stream >>>(
			&(((double*)CudaObject_DEVICEDATA(A))[A_offset]), tmp, RANGE, A_stride, nelements, cop);
	
	while(nelements>RANGE){		
		nelements = (int)ceil((float)nelements / (float)RANGE);
		ts = (int)ceil((float)nelements / (float)RANGE);
		tx=nelements, ty=1, b=1;
		calc_xy(ts, &tx, &b);
		dim3 threads(tx, ty); dim3 grid(b);

		//Doing it inplace		
		_device_reduce_dbl<<< grid, threads, 0, stream >>>(
					tmp, tmp, RANGE, 1, nelements, cop);
		cutilCheckMsg("Kernel execution failed");
	}	
	
	cutilSafeCall(cudaStreamSynchronize(stream));
	cutilSafeCall(cudaMemcpy(f_out, tmp, sizeof(double), cudaMemcpyDeviceToHost));
	cudaFree(tmp);
}

extern "C"
void ufunc_m_dbl(CudaObject *res, CudaObject *A, int res_offset, 
			 int A_offset, char *op, int size){	
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
        
	_device_ufunc_m_dbl<<< grid, threads >>>(
					&(((double*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((double*)CudaObject_DEVICEDATA(A))[A_offset]),
					RANGE, size, maptocudaop(op));
					
	cutilCheckMsg("ufunc_m - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_ms_dbl(CudaObject *res, CudaObject *A, void *scalarb, 
			  int res_offset, int A_offset, char *op, int size, int reverse){	
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	if(reverse) // b op A
		_device_ufunc_sm_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((double*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((double*)CudaObject_DEVICEDATA(A))[A_offset]), *(double*)scalarb,
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_ms_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((double*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((double*)CudaObject_DEVICEDATA(A))[A_offset]), *(double*)scalarb,
					RANGE, size, maptocudaop(op));
	cutilCheckMsg("ufunc_ms - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_mm_dbl(CudaObject *res, CudaObject *A, CudaObject *B, 
			  int res_offset, int A_offset, int B_offset, 
			  char *op, int size){				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	_device_ufunc_mm_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((double*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((double*)CudaObject_DEVICEDATA(A))[A_offset]), 
					&(((double*)CudaObject_DEVICEDATA(B))[B_offset]), 
					RANGE, size, maptocudaop(op));
	
	cutilCheckMsg("ufunc_mm_dbl - Kernel execution failed");
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}


/* LONG */
/**********/
extern "C"
void ufunc_reduce_lng(CudaObject *A, int A_offset, int A_stride, 
				      char *op, void *f_out, int nelements, cudaStream_t stream)
{
	long *tmp;

	int ts = (int)ceil((float)nelements / (float)RANGE);
	
	int tx=ts, ty=1, b=1;
	calc_xy(ts, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
	
	cutilSafeCall(cudaMalloc((void**) &tmp, ts*sizeof(long)));
	
	cuda_ops cop = maptocudaop(op);
	_device_reduce_lng<<< grid, threads, 0, stream >>>(
			&(((long*)CudaObject_DEVICEDATA(A))[A_offset]), tmp, RANGE, A_stride, nelements, cop);

	while(nelements>RANGE){ //Explicitely synchronize by using same stream
		nelements = (int)ceil((float)nelements / (float)RANGE);
		ts = (int)ceil((float)nelements / (float)RANGE);
		tx=nelements, ty=1, b=1;
		calc_xy(ts, &tx, &b);
		dim3 threads(tx, ty); dim3 grid(b);
		//Doing it inplace
		_device_reduce_lng<<< grid, threads, 0, stream >>>(
					tmp, tmp, RANGE, 1, nelements, cop);
		cutilCheckMsg("Kernel execution failed");
	}
	//I have to sync so I can copy back and free device mem
	cutilSafeCall(cudaStreamSynchronize(stream));	
	cutilSafeCall(cudaMemcpy(f_out, tmp, sizeof(long), cudaMemcpyDeviceToHost));	
	cudaFree(tmp);	
}

extern "C"
void ufunc_m_lng(CudaObject *res, CudaObject *A, int res_offset, 
			 int A_offset, char *op, int size){					 
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
        
	_device_ufunc_m_lng<<< grid, threads >>>(
					&(((long*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((long*)CudaObject_DEVICEDATA(A))[A_offset]),
					RANGE, size, maptocudaop(op));
					
	cutilCheckMsg("ufunc_m - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_ms_lng(CudaObject *res, CudaObject *A, void *scalarb, 
			  int res_offset, int A_offset, char *op, int size, int reverse){
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
        
	if(reverse) // b op A
		_device_ufunc_sm_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((long*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((long*)CudaObject_DEVICEDATA(A))[A_offset]), *(long*)scalarb,
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_ms_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((long*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((long*)CudaObject_DEVICEDATA(A))[A_offset]), *(long*)scalarb,
					RANGE, size, maptocudaop(op));
	
	cutilCheckMsg("ufunc_ms - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_mm_lng(CudaObject *res, CudaObject *A, CudaObject *B, 
			  int res_offset, int A_offset, int B_offset, 
			  char *op, int size){				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	_device_ufunc_mm_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((long*)CudaObject_DEVICEDATA(res))[res_offset]), 
					&(((long*)CudaObject_DEVICEDATA(A))[A_offset]), 
					&(((long*)CudaObject_DEVICEDATA(B))[B_offset]), 
					RANGE, size, maptocudaop(op));
					
	cutilCheckMsg("ufunc_mm_lng - Kernel execution failed");
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

/* EQUALITY - DOUBLE */
/*********************/
extern "C"
void ufunc_equality_ms_dbl(CudaObject *res, CudaObject *A, void *scalarb, 
			  int res_offset, int A_offset, char *op, int size, int reverse){
				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	if(reverse) // b op A
		_device_ufunc_equality_sm_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((char*)res->devicedata)[res_offset]), 
					&(((double*)A->devicedata)[A_offset]), *(double*)scalarb,
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_equality_ms_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((char*)res->devicedata)[res_offset]), 
					&(((double*)A->devicedata)[A_offset]), *(double*)scalarb,
					RANGE, size, maptocudaop(op));
					
	cutilCheckMsg("ufunc_ms - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_equality_mm_dbl(CudaObject *res, CudaObject *A, CudaObject *B, 
			  int res_offset, int A_offset, int B_offset, 
			  char *op, int size){
				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);    

	_device_ufunc_equality_mm_dbl<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
				&(((char*)CudaObject_DEVICEDATA(res))[res_offset]), 
				&(((double*)CudaObject_DEVICEDATA(A))[A_offset]), 
				&(((double*)CudaObject_DEVICEDATA(B))[B_offset]), 
				RANGE, size, maptocudaop(op));

	cutilCheckMsg("ufunc_mm - Kernel execution failed");
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}


/* EQUALITY - LONG */
/*********************/
extern "C"
void ufunc_equality_ms_lng(CudaObject *res, CudaObject *A, void *scalarb, 
			  int res_offset, int A_offset, char *op, int size, int reverse){
				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	if(reverse) // b op A
		_device_ufunc_equality_sm_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((char*)res->devicedata)[res_offset]), 
					&(((long*)A->devicedata)[A_offset]), *(long*)scalarb,
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_equality_ms_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
					&(((char*)res->devicedata)[res_offset]), 
					&(((long*)A->devicedata)[A_offset]), *(long*)scalarb,
					RANGE, size, maptocudaop(op));
					
	cutilCheckMsg("ufunc_ms - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_equality_mm_lng(CudaObject *res, CudaObject *A, CudaObject *B, 
			  int res_offset, int A_offset, int B_offset, 
			  char *op, int size){
				  
	int tx=size, ty=1, b=1;
	calc_xy(size, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);    

	_device_ufunc_equality_mm_lng<<< grid, threads, 0, CudaObject_ACTIVESTREAM(res) >>>(
				&(((char*)CudaObject_DEVICEDATA(res))[res_offset]), 
				&(((long*)CudaObject_DEVICEDATA(A))[A_offset]), 
				&(((long*)CudaObject_DEVICEDATA(B))[B_offset]), 
				RANGE, size, maptocudaop(op));

	cutilCheckMsg("ufunc_mm - Kernel execution failed");
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

/* FLOAT */
/*********/
/* DEPRECATED
extern "C"
void ufunc_reduce(CudaObject *A, int A_offset, int A_stride, 
				  char *op, float *f_out, int nelements)
{		
	float *tmp, *res;	

	int ts = nelements / RANGE;
	if(nelements % RANGE != 0)
		ts++;
	
	int tx=ts, ty=1, b=1;
	calc_xy(ts, &tx, &b);
	dim3 threads(tx, ty);
    dim3 grid(b);
	
	//printf("nelements=%d ts=%d tx=%d b=%d \n", nelements, ts, tx, b);
	
	cutilSafeCall(cudaMalloc((void**) &tmp, ts*sizeof(float)));	
	cutilSafeCall(cudaMalloc((void**) &res, sizeof(float)));
	
	cuda_ops cop = maptocudaop(op);
	_device_reduce<<< grid, threads >>>(&(((float*)A->devicedata)[A_offset]), tmp, RANGE, A_stride, nelements, cop);
	cudaThreadSynchronize();
	if(cop == cuda_sub) cop = cuda_add;//1-2-3-4-5-6 == (1-2-3)+(-4-5-6)
	_device_reduce<<< 1, 1 >>>(tmp, res, ts, 1, ts, cop);
	
	cutilCheckMsg("Kernel execution failed");
	
	cutilSafeCall(cudaMemcpy(f_out, res, sizeof(float),
					  cudaMemcpyDeviceToHost) );
	
	cudaFree(res);cudaFree(tmp);
}

extern "C"
void ufunc_m(CudaObject *res, CudaObject *A, int res_offset, 
			 int A_offset, char *op, int size){	
	//printf("ufunc_ms\n");
//	int RANGE = 10;	
	int tx=size, ty=1, b=1;
	if(size > 128){
		tx=128, ty=1;
		b = size / 128;
		if(size % 128 != 0)
			b++;			
	}	
	dim3 threads(tx, ty);
    dim3 grid(b);
        
	_device_ufunc_m<<< grid, threads >>>(
					&(((float*)res->devicedata)[res_offset]), 
					&(((float*)A->devicedata)[A_offset]),
					RANGE, size, maptocudaop(op));	
	//cutilCheckMsg( strcat("ufunc_m - Kernel execution failed, op=", op));
	cutilCheckMsg("ufunc_m - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_ms(CudaObject *res, CudaObject *A, float *scalarb, 
			  int res_offset, int A_offset, char *op, int size, int reverse){	
	//printf("ufunc_ms\n");
	//int range = 10;	
	int tx=size, ty=1, b=1;
	if(size > 128){
		tx=128, ty=1;
		b = size / 128;
		if(size % 128 != 0)
			b++;			
	}	
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	if(reverse) // b op A
		_device_ufunc_sm<<< grid, threads >>>(
					&(((float*)res->devicedata)[res_offset]), 
					&(((float*)A->devicedata)[A_offset]), *scalarb,
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_ms<<< grid, threads >>>(
					&(((float*)res->devicedata)[res_offset]), 
					&(((float*)A->devicedata)[A_offset]), *scalarb,
					RANGE, size, maptocudaop(op));
	//cutilCheckMsg( strcat("ufunc_ms - Kernel execution failed, op=", op));
	cutilCheckMsg("ufunc_ms - Kernel execution failed");
	
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}

extern "C"
void ufunc_mm(CudaObject *res, CudaObject *A, CudaObject *B, 
			  int res_offset, int A_offset, int B_offset, 
			  char *op, int size, int reverse){
	//printf("ufunc_mm\n");
	//int range = 10;	
	int tx=size, ty=1, b=1;
	if(size > 128){
		tx=128, ty=1;
		b = size / 128;
		if(size % 128 != 0)
			b++;			
	}	
	dim3 threads(tx, ty);
    dim3 grid(b);
    
	if(reverse) // b op A
		_device_ufunc_mm<<< grid, threads >>>(//(float*)res->devicedata, (float*)B->devicedata, (float*)A->devicedata, 
					&(((float*)res->devicedata)[res_offset]), 
					&(((float*)B->devicedata)[B_offset]), 
					&(((float*)A->devicedata)[A_offset]), 
					RANGE, size, maptocudaop(op));
	else // A op b
		_device_ufunc_mm<<< grid, threads >>>(
					&(((float*)res->devicedata)[res_offset]), 
					&(((float*)A->devicedata)[A_offset]), 
					&(((float*)B->devicedata)[B_offset]), 
					RANGE, size, maptocudaop(op));
	//cutilCheckMsg( strcat("ufunc_mm - Kernel execution failed, op=", op));
	cutilCheckMsg("ufunc_mm - Kernel execution failed");
	CudaObject_STATUS(res) |= DATA_ON_DEVICE;
}*/
