#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>

#ifndef CBLAS_HEADER
#define CBLAS_HEADER "../blasdot/cblas.h"
#endif
#include CBLAS_HEADER

#ifdef DNPY_TIME
#include <sys/time.h>
#endif

//Pointer to the python module who has the ufunc operators.
static PyObject *ufunc_module;

static int co_uid_count = 1; //Unique identification counter

static int cudaobject_uids[NUMBER_OF_CUDAARRAYS];
static CudaObject cudaobjects[NUMBER_OF_CUDAARRAYS];

//Find stored CudaObject and return it
static 
CudaObject *cnumpy_get_cudaobject(int uid)
{
        
    int i;
    for(i=0; i < NUMBER_OF_CUDAARRAYS; i++)
        if(cudaobject_uids[i] == uid)
            return &cudaobjects[i];
	
	fprintf(stderr, "cnumpy_get_cudaobject: NUMBER_OF_CUDAARRAYS is exceeded uid=%d\n", uid);
    return NULL;
}

//Nice printing of PyArrayObject's
static 
void printArray(PyArrayObject *obj)
{
	int i;
	
	printf("flags %d = ", PyArray_FLAGS(obj));
	if(PyArray_CHKFLAGS(obj, NPY_C_CONTIGUOUS))
		printf("NPY_C_CONTIGUOUS ");
	if(PyArray_CHKFLAGS(obj, NPY_F_CONTIGUOUS))
		printf("NPY_F_CONTIGUOUS ");
	if(PyArray_CHKFLAGS(obj, NPY_OWNDATA))
		printf("NPY_OWNDATA ");
	if(PyArray_CHKFLAGS(obj, NPY_ALIGNED))
		printf("NPY_ALIGNED ");
	if(PyArray_CHKFLAGS(obj, NPY_WRITEABLE))
		printf("NPY_WRITEABLE ");
	if(PyArray_CHKFLAGS(obj, NPY_UPDATEIFCOPY))
		printf("NPY_UPDATEIFCOPY");
	
	printf("\n");
	
	printf("PyArray_IsZeroDim     =%s ", PyArray_IsZeroDim(obj)?"T":"F");
	printf("PyArray_CheckScalar=%s\n",PyArray_CheckScalar(obj)?"T":"F");
	printf("PyArray_IsPythonScalar=%s ", PyArray_IsPythonScalar(obj)?"T":"F");
	printf("PyArray_IsAnyScalar=%s\n",PyArray_IsAnyScalar(obj)?"T":"F");
	
	printf("PyArray_ISWRITEABLE   =%s ", PyArray_ISWRITEABLE(obj)?"T":"F");
	printf("PyArray_ISALIGNED  =%s ", PyArray_ISALIGNED(obj)?"T":"F");
	printf("PyArray_ISBEHAVED=%s\n", PyArray_ISBEHAVED(obj)?"T":"F");
	
	printf("PyArray_NDIM=%d PyArray_DIM=[", PyArray_NDIM(obj));
	if(PyArray_NDIM(obj)<NPY_MAXDIMS)
		for(i=0;i<PyArray_NDIM(obj);i++)
			printf("%d(%d),",PyArray_DIM(obj, i), PyArray_STRIDE(obj, i));
	printf("]\n");	
	
	printf("SIZE=%d ", PyArray_Size(obj));
	printf("ITEMSIZE=%d ", PyArray_ITEMSIZE(obj));	
	printf("TYPE=%d\n", PyArray_TYPE(obj));
	if(!PyArray_DATA(obj))
		printf("DATA == NULL ");
	if(!PyArray_BASE(obj))
		printf("BASE == NULL ");
	
	if(PyArray_CUDAUID(obj)>0){
		CudaObject *co = cnumpy_get_cudaobject(PyArray_CUDAUID(obj));
		if(!co->ufuncs) printf("CudaObject->ufuncs == NULL ");
		printf("CudaObject_STATUS=%d ", CudaObject_STATUS(co));
	}
	printf("cudauid = %d\n", PyArray_CUDAUID(obj));
	printf("-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
}

/*
 * PUBLIC FUNCTIONS
 */
static void
cnumpy_init(void){	       
    
    srand((unsigned)(time(NULL)));
    if(verify_device())
		return 1;
	
	PyErr_SetString(PyExc_RuntimeError,
						"No CUDA enabled devices available.\n");
	
	return -1;
}

static void
cnumpy_exit(void){
    
	int i, l=0;
    for(i=0; i < DNPY_MAX_NARRAYS; i++)
        if(cudaobject_uids[i] != 0){
            l++;
            printf("leak uid:%d\n", cudaobject_uids[i]);
		}
    if(l)
		printf("Number of leaks at shutdown are %d\n", l);
    
	shutdowncuda();
}

static void
cnumpy_destroy_cudaobject(int uid)
{
	int i;
    for(i=0; i < DNPY_MAX_NARRAYS; i++)
        if(cudaobject_uids[i] == uid){
			if(CudaObject_DECREF(&cudaobjects[i]) <= 0){
				freedevicedata(&cudaobjects[i]);
				if(cudaobjects[i].ufuncs != NULL)
					free(cudaobjects[i].ufuncs);
				cudaobject_uids[i] = 0;
				return;
			}
		}
}

static
int cnumpy_incref(uid){
	CudaObject *co = cnumpy_get_cudaobject(uid);
	
	return CudaObject_INCREF(co);
}

static
int cnumpy_create_cudaobject(int dtype){
	CudaObject *co = (CudaObject *)malloc(sizeof(CudaObject));
	CudaObject_STATUS(co) = DATA_ON_HOST;
	CudaObject_UID(co) = co_uid_count++;
	CudaObject_REFERENCECOUNT(co) = 1;	
	co->ufuncs = malloc(sizeof(DeviceUniFuncs));
	
	if(PyTypeNum_ISFLOAT(dtype)){
		CudaObject_UFUNCS(co)->ufunc_reduce_fptr = ufunc_reduce_dbl;
		CudaObject_UFUNCS(co)->ufunc_m_fptr  = ufunc_m_dbl;
		CudaObject_UFUNCS(co)->ufunc_ms_fptr = ufunc_ms_dbl;
		CudaObject_UFUNCS(co)->ufunc_mm_fptr = ufunc_mm_dbl;
		CudaObject_UFUNCS(co)->ufunc_dot_fptr = ufunc_dot_dbl;
	}
	else if(PyTypeNum_ISINTEGER(dtype)){
		CudaObject_UFUNCS(co)->ufunc_reduce_fptr = ufunc_reduce_lng;
		CudaObject_UFUNCS(co)->ufunc_m_fptr  = ufunc_m_lng;
		CudaObject_UFUNCS(co)->ufunc_ms_fptr = ufunc_ms_lng;
		CudaObject_UFUNCS(co)->ufunc_mm_fptr = ufunc_mm_lng;
	}
	
	int i;
    for(i=0; i < DNPY_MAX_NARRAYS; i++)
        if(cudaobject_uids[i] == 0)
        {
            memcpy(&cudaobjects[i], co, sizeof(CudaObject)); 
            cudaobject_uids[i] = CudaObject_UID(co);
            break;
        }
	free(co);
	return co_uid_count-1;//CudaObject_UID(co);
}

static void
cnumpy_cudaarray_getitem(PyArrayObject *obj)
{
    CudaObject *co = cnumpy_get_cudaobject(PyArray_CUDAUID(obj));
	copy_fromdevice(PyArray_DATA(obj), co, 
					PyArray_Size(obj), 
					PyArray_ITEMSIZE(obj));
} /* cnumpy_cudaarray_getitem */


/*NUMPY_API
 *===================================================================
 * Public
 * C = A * B.
 * All elements in C must be zero.
 */
static void
cnumpy_matrix_multiplication(PyArrayObject *A, PyArrayObject *B, PyArrayObject *C)
{		
	if(PyArray_CUDAUID(A) < 1)
		printf("A not cuda\n");
	if(PyArray_CUDAUID(B) < 1)
		printf("B not cuda\n");
	if(PyArray_CUDAUID(C) < 1)
		printf("C not cuda\n");  
	
	CudaObject *coA = cnumpy_get_cudaobject(PyArray_CUDAUID(A)),
			   *coB = cnumpy_get_cudaobject(PyArray_CUDAUID(B)),
			   *coC = cnumpy_get_cudaobject(PyArray_CUDAUID(C));
	
	int m,n,k; //C=m×n, A=m×k and B=k×n
	
	m = PyArray_DIM(A, 0);
	n = PyArray_DIM(B, 1);
	k = PyArray_DIM(A, 1);
	
	copy_todevice(PyArray_DATA(A), coA, PyArray_Size(A), PyArray_ITEMSIZE(A));
	copy_todevice(PyArray_DATA(B), coB, PyArray_Size(B), PyArray_ITEMSIZE(B));	
	alloc_ondevice(coC, PyArray_Size(C), PyArray_ITEMSIZE(C));
	
	ufunc_dot_dbl(coA, coB, coC, m, n, k);
	CudaObject_STATUS(coC) = (DEVICE_ALLOC|DATA_ON_DEVICE);
}

/*NUMPY_API
 * ===================================================================
 * Public
 * Registrate cblas. Used in _dotblas.c
 */
static void
cnumpy_reg_cblas(void *dgemm, void *sgemm, void *zgemm, void *cgemm)
{
    cblas_dgemm_func = dgemm;
    cblas_sgemm_func = sgemm;
    cblas_zgemm_func = zgemm;
    cblas_cgemm_func = cgemm;
} /* cnumpy_reg_cblas */

//Universal functions iterator
static        
int ufunc(PyArrayObject *A, PyArrayObject *B,  PyArrayObject *res,
			int *dima, int *dimb, int ndim, int currentdim, int A_offset, 
			int B_offset, int res_offset, char *op, CudaObject **colist){
	//Copying async to the device when reaching second to last dimension
	if(currentdim >= ndim-2){
		int size = dima[currentdim]*dima[currentdim+1];		
		copy_todevice_async(PyArray_DATA(A), colist[0], size, PyArray_ITEMSIZE(A), A_offset, CudaObject_ACTIVESTREAM(colist[2]));
		
		size = dimb[currentdim]*dimb[currentdim+1];
		copy_todevice_async(PyArray_DATA(B), colist[1], size, PyArray_ITEMSIZE(B), B_offset, CudaObject_ACTIVESTREAM(colist[2]));
	}
	
	int i, j, r, maxndim = max(dima[currentdim], dimb[currentdim]),
	    res_tmp_offset, A_tmp_offset, B_tmp_offset;
	float scalar;
	for(i=0 ; i < maxndim ; i++){
		if(currentdim >= ndim-2){ //Stopping at second to last dim
			res_tmp_offset = res_offset+i*PyArray_DIM(res, currentdim+1);			
			//Calculate
			if(dima[currentdim] == dimb[currentdim]){ //A and B are vectors
				if(dima[currentdim+1] == dimb[currentdim+1]){ //A and B are vectors in the last dim
					CudaObject_UFUNCS(colist[2])->ufunc_mm_fptr(colist[2], colist[0], 
							 colist[1], res_tmp_offset, 
							 A_offset, B_offset, 
							 op, dimb[currentdim]*dimb[currentdim+1]);
					break;  //Shortcut, we are done allready
				} else if(dima[currentdim+1] == 1){//A is a scalar in the last dim
					if(dima[currentdim] == 1)//TEST: a(3,1,1) b(3,1,3) a+b						
						A_tmp_offset = A_offset;
					else //TEST: a(3,3,1) b(3,3,3) a+b			
						A_tmp_offset = A_offset+i;
					
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[1], &(((double*)PyArray_DATA(A))[A_tmp_offset]),
							 res_tmp_offset, B_offset+i*dimb[currentdim+1], op, dimb[currentdim+1], 1);					
				} else if(dimb[currentdim+1] == 1){//B is a scalar in the last dim					
					if(dimb[currentdim] == 1) //TEST: a(3,1,3) b(3,1,1) a+b
						B_tmp_offset = B_offset;
					else //TEST: a(3,3,3) b(3,3,1) a+b					
						B_tmp_offset = B_offset+i;						
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[0], &(((double*)PyArray_DATA(B))[B_tmp_offset]),
							 res_tmp_offset, A_offset+i*dima[currentdim+1], op, dima[currentdim+1], 0);
				}
			}else if(dima[currentdim] == 1){ //A is scalar in currentdim
				if(//dima[currentdim+1] > 1 && dimb[currentdim+1] > 1 && 
				   dima[currentdim+1] == dimb[currentdim+1]){ //A and B are vectors in the last dim
				   //TEST: a(3,1,4) b(3,3,4) a+b
					CudaObject_UFUNCS(colist[2])->ufunc_mm_fptr(colist[2], colist[0], 
							 colist[1], res_tmp_offset, 
							 A_offset, B_offset+i*dimb[currentdim+1], 
							 op, dimb[currentdim+1]);
				} else if(dima[currentdim+1] == 1){//A is a scalar in the last dim
					//TEST: a(3,1,1) b(1,3,3) a+b					
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[1], &(((double*)PyArray_DATA(A))[A_offset]), 
							 res_tmp_offset, B_offset+i*dimb[currentdim+1], op, dimb[currentdim+1], 1);
				} else if(dimb[currentdim+1] == 1){//B is a scalar in the last dim
					//TEST: a(3,1,3) b(1,3,1) a+b
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[0], &(((double*)PyArray_DATA(B))[B_offset+i]), 
							 res_tmp_offset, A_offset, op, dima[currentdim+1], 0);					
				}
			}else if(dimb[currentdim] == 1){ //B is scalar in currentdim
				if(dima[currentdim+1] == dimb[currentdim+1]){ //A and B are vectors in the last dim
				   //TEST: a(3,3,4) b(3,1,4) a+b
					CudaObject_UFUNCS(colist[2])->ufunc_mm_fptr(colist[2], colist[0], 
							 colist[1], res_tmp_offset, 
							 A_offset+i*dima[currentdim+1], B_offset, 
							 op, dimb[currentdim+1]);
				} else if(dima[currentdim+1] == 1){ //A is a scalar in the last dim
					//TEST: a(3,3,1) b(1,1,3) a+b
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[1], &(((double*)PyArray_DATA(A))[A_offset+i]), 
							 res_tmp_offset, B_offset, op, dimb[currentdim+1], 1);
				} else if(dimb[currentdim+1] == 1){ //B is a scalar in the last dim
					//TEST: a(3,1,3) b(1,3,1) a+b
					CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr(colist[2], colist[0], 
							 &(((long*)PyArray_DATA(B))[B_offset]), res_tmp_offset, A_offset+i*dima[currentdim+1], op, dima[currentdim+1], 0);					
				}
			}else{
				PyErr_SetString(PyExc_RuntimeError, "ufunc - shape mismatch\n");
				return -1;
			}			
		}else{
			//calculating offset for this dimension
			A_tmp_offset = 1, B_tmp_offset = 1, res_tmp_offset = 1;
			for(j=currentdim+1 ; j<ndim ; j++){
				A_tmp_offset *= dima[j];
				B_tmp_offset *= dimb[j];
				res_tmp_offset *= PyArray_DIM(res, j);
			}
			if(dima[currentdim] > 1) A_tmp_offset = A_tmp_offset*i + A_offset;
			else A_tmp_offset = A_offset;			
			if(dimb[currentdim] > 1) B_tmp_offset = B_tmp_offset*i + B_offset;
			else B_tmp_offset = B_offset;			
			if(PyArray_DIM(res, currentdim) > 1) res_tmp_offset = res_tmp_offset*i + res_offset;
			else res_tmp_offset = res_offset;
			//Recursive step
			r = ufunc(A, B, res, dima, dimb, ndim, currentdim+1, 
					  A_tmp_offset, B_tmp_offset, res_tmp_offset, op, colist);
			if(r == -1) return -1; //Error, bail out
		}
	}
	if(currentdim >= ndim-2)
		CudaObject_SI(colist[2])++;
		
	return 0;
}

/*NUMPY_API
 * ===================================================================
 * Public
 * Main method for universal functions
 */
static int
cnumpy_ufunc(PyArrayObject *arylist[NPY_MAXARGS], int narys,
             int nout_arys, char *op)
{
	if(narys==3) //Making sure operands have the same types
		if(!((PyTypeNum_ISINTEGER(PyArray_TYPE(arylist[0])) && 
		      PyTypeNum_ISINTEGER(PyArray_TYPE(arylist[1]))) ||
		     (PyTypeNum_ISFLOAT(PyArray_TYPE(arylist[0])) && 
		      PyTypeNum_ISFLOAT(PyArray_TYPE(arylist[1]))))){	
				printArray(arylist[0]);
				printArray(arylist[1]);
				printf("All operands needs to have the same type!\n"); 
				return -1;
			  }
	
	//Fetching the CudaObjects
	CudaObject *colist[narys];	
	int i, uid, s_uid=-1;
	for(i=0;i<narys;i++){
		uid = PyArray_CUDAUID(arylist[i]);
		if(uid < 1){//Special case for scalars   
			PyArray_CUDAUID(arylist[i]) = cnumpy_create_cudaobject(PyArray_TYPE(arylist[i]));
			uid = PyArray_CUDAUID(arylist[i]);
			s_uid = uid;
		}
		colist[i] = cnumpy_get_cudaobject(uid);		
	}
	
	if(narys==3)//Need to find the right function pointers incase of boolean functions
		if(PyArray_ISBOOL(arylist[2])){
			if(PyTypeNum_ISFLOAT(PyArray_TYPE(arylist[0])) &&
			   PyTypeNum_ISFLOAT(PyArray_TYPE(arylist[1]))){
				CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr = ufunc_equality_ms_dbl;
				CudaObject_UFUNCS(colist[2])->ufunc_mm_fptr = ufunc_equality_mm_dbl;
			}else if(PyTypeNum_ISINTEGER(PyArray_TYPE(arylist[0])) &&
					 PyTypeNum_ISINTEGER(PyArray_TYPE(arylist[1]))){
				CudaObject_UFUNCS(colist[2])->ufunc_ms_fptr = ufunc_equality_ms_lng;
				CudaObject_UFUNCS(colist[2])->ufunc_mm_fptr = ufunc_equality_mm_lng;				   
			}else{			
				printf("Unsupported boolean comparison.\n"); 
				return -1;
			}
		}
	
	if(narys - nout_arys == 1){
		if(strcmp(op, "ufunc_random") == 0){//non cuda
			int i,j;
			npy_intp size = PyArray_Size(arylist[1]);
			if(PyTypeNum_ISFLOAT(PyArray_TYPE(arylist[1])))
				for(i=0; i < size; i++)
					((double *)PyArray_DATA(arylist[1]))[i] = (double)rand()/(double)RAND_MAX;			
			if(PyTypeNum_ISCOMPLEX(PyArray_TYPE(arylist[1])))
				for(i=0, j=0; i < size; i++){
					((double *)PyArray_DATA(arylist[1]))[j++] = (double)rand()/(double)RAND_MAX;
					((double *)PyArray_DATA(arylist[1]))[j++] = (double)rand()/(double)RAND_MAX;
				}
			CudaObject_STATUS(colist[1]) &= DEVICE_ALLOC; //implicitly removes DATA_ON_DEVICE
			CudaObject_STATUS(colist[1]) |= DATA_ON_HOST;
			return 0;
		}else{//Operation are square or similar  
			copy_todevice(PyArray_DATA(arylist[0]), colist[0], 
						  PyArray_Size(arylist[0]), PyArray_ITEMSIZE(arylist[0]));
			alloc_ondevice(colist[1], PyArray_Size(arylist[1]), PyArray_ITEMSIZE(arylist[1]));	
			
			CudaObject_UFUNCS(colist[0])->ufunc_m_fptr(colist[1], colist[0], 0,
									0, op, PyArray_Size(arylist[0]));			
			
			CudaObject_STATUS(colist[1]) = (DATA_ON_DEVICE|DEVICE_ALLOC);
			return 0; 
		}
	}
	
	for(i=0;i<narys;i++) //Making sure allocation are done
		alloc_ondevice(colist[i], PyArray_Size(arylist[i]), PyArray_ITEMSIZE(arylist[i]));
			
	int ndima = PyArray_NDIM(arylist[0]),
		ndimb = PyArray_NDIM(arylist[1]),			
		*dima, *dimb, nstreams = 1;
	int ia = ndima-1, ib = ndimb-1, maxndim = max(ndima, ndimb);
	if(maxndim == 1) maxndim++; //ufunc doesnt work with just vectors
			
	dima = (int*)malloc(sizeof(int)*maxndim);
	dimb = (int*)malloc(sizeof(int)*maxndim);
	//Prepend with ones according to the broadcast rules
	for(i=maxndim-1 ; 0<=i ; i--, ia--, ib--){
		if(ia<0) dima[i] = 1;
		else dima[i] = PyArray_DIM(arylist[0], ia);
		if(ib<0) dimb[i] = 1;
		else dimb[i] = PyArray_DIM(arylist[1], ib);
		
		if(i < (maxndim-2)) nstreams *= max(dima[i], dimb[i]);
	} 
	
	CudaObject_STREAMS(colist[2]) = malloc(sizeof(int)*nstreams);
	CudaObject_SI(colist[2]) = 0;
	for(i=0;i<nstreams;i++)
		create_stream(&(CudaObject_STREAMS(colist[2])[i]));
	
	int r = ufunc(arylist[0], arylist[1], arylist[2], dima, dimb, maxndim, 0, 0, 0, 0, op, colist);
	//Cleanup
	for(i=0;i<nstreams;i++)		
		destroy_stream(&(CudaObject_STREAMS(colist[2])[i]));
	free(CudaObject_STREAMS(colist[2]));
	CudaObject_SI(colist[2]) = -1;
	CudaObject_STATUS(colist[0]) |= DATA_ON_DEVICE;
	CudaObject_STATUS(colist[1]) |= DATA_ON_DEVICE;
	CudaObject_STATUS(colist[2])  = (DATA_ON_DEVICE|DEVICE_ALLOC);
	free(dima), free(dimb);
	
	if(s_uid > -1) //Dereference again
		cnumpy_destroy_cudaobject(s_uid);

	return r;
} /* cnumpy_ufunc */

/*NUMPY_API
 * ===================================================================
 * Public
 * Universal function reduce
 */
static int
cnumpy_ufunc_reduce(PyArrayObject *in_ary, PyArrayObject *out_ary,
                    int axis, char *op)
{    
	time_t  t0, t1;
	clock_t c0, c1;
	CudaObject *co = cnumpy_get_cudaobject(PyArray_CUDAUID(in_ary));
	
	alloc_ondevice(co, PyArray_Size(in_ary), PyArray_ITEMSIZE(in_ary));
	
	void *f = malloc(sizeof(PyArray_ITEMSIZE(in_ary)));
	int i, end = 1, stride = 1, offset=1;
	for(i=axis+1 ; i<PyArray_NDIM(in_ary) ; i++)
		stride *= PyArray_DIM(in_ary, i);
	
	for(i=0;i<PyArray_NDIM(in_ary);i++)
		if(i!=axis)
			end *= PyArray_DIM(in_ary, i);
	
	//Offset is the axis'ed dimension's size, except for axis==0
	if(axis>0)
		offset = PyArray_DIM(in_ary, axis);                
	if(stride > 1)
		copy_todevice(PyArray_DATA(in_ary), co, PyArray_Size(in_ary), PyArray_ITEMSIZE(in_ary));
		           
	int *streams = (int*)malloc(sizeof(int)*end);	
	for(i=0 ; i < end ; i++){
		create_stream(&(streams[i]));
		if(stride==1 && !CudaObject_CHECKSTATUS(co, DATA_ON_DEVICE)){
			copy_todevice_async(PyArray_DATA(in_ary), co, PyArray_DIM(in_ary, axis), PyArray_ITEMSIZE(in_ary), i*offset, streams[i]);
		}
		CudaObject_UFUNCS(co)->ufunc_reduce_fptr(co, i*offset, stride, op, f, PyArray_DIM(in_ary, axis), streams[i]);
		
		//Type doesn't matter because its only pointers, only size
		((double*)(PyArray_DATA(out_ary)))[i] = *((double*)f);
	}
	
	CudaObject_STATUS(co) |= DATA_ON_DEVICE;
	if(PyArray_CUDAUID(out_ary) > 0){
		co = cnumpy_get_cudaobject(PyArray_CUDAUID(out_ary));
		CudaObject_STATUS(co) = DATA_ON_HOST;
	}else
		PyArray_CUDAUID(out_ary) = cnumpy_create_cudaobject(PyArray_TYPE(out_ary));
	
	return 0;
} /* cnumpy_ufunc_reduce */

/*NUMPY_API
 * ===================================================================
 * Public
 * Dsicrete Fourier transformations
 */
static int
cnumpy_ufunc_fft(PyArrayObject *in_ary, PyArrayObject *out_ary, char *op)
{
	if(PyArray_NDIM(in_ary)>2 || PyArray_NDIM(out_ary)>2){
		printf("Only support up to 2-dimensional ndarray's.\n"); 
		return -1;
	}

	CudaObject *in_co  = cnumpy_get_cudaobject(PyArray_CUDAUID(in_ary));
	CudaObject *out_co = cnumpy_get_cudaobject(PyArray_CUDAUID(out_ary));

	
	copy_todevice(PyArray_DATA(in_ary), in_co, 
				  PyArray_Size(in_ary), PyArray_ITEMSIZE(in_ary));
				  
	alloc_ondevice(out_co, PyArray_Size(out_ary), PyArray_ITEMSIZE(out_ary));

	ufunc_fft_dbl(in_co, out_co, PyArray_DIMS(in_ary), PyArray_NDIM(in_ary), op);

	CudaObject_STATUS(out_co) = (DEVICE_ALLOC|DATA_ON_DEVICE);
	
	return 0;
}

/*NUMPY_API     
 * ===================================================================
 * Public
 * Register ufuncs
 */
static void
cnumpy_reg_ufunc_module(PyObject *module)
{
    ufunc_module = module;
} /* cnumpy_reg_ufunc_module */  

/*NUMPY_API
 * ===================================================================
 * Public   
 * Allocate host memory
 */
static void
cnumpy_malloc(void **ptr, int nbytes)
{
	malloc_host(ptr, nbytes);
}

/*NUMPY_API
 * ===================================================================
 * Public
 * Free host memory
 */
static void
cnumpy_free(void *ptr)
{
	free_host(ptr);
}
