/* -----------------------------------------------------------------------
   See COPYRIGHT.TXT and LICENSE.TXT for copyright and license information
   ----------------------------------------------------------------------- */
#include "fdk_cuda.h"
#include "drr_opts.h"
#include "plm_timer.h"
#include "drr_util.h"
#include "proj_image.h"
#include "proj_image_set.h"

#define BSPLINE_XFORM_INIT(pB) 	bspline_xform_initialize (pB,Vr->offset,Vr->pix_spacing,Vr->dim,roi_offset,Vr->dim,vox_per_rgn)
#define BSPLINE_XFORM_INIT_ROI(pB) 	bspline_xform_initialize (pB,Vr->offset,Vr->pix_spacing,Vr->dim,roi_offset,roi_dim,vox_per_rgn)

#define _CRT_SECURE_NO_DEPRECATE
#define READ_PFM
#define WRITE_BLOCK (1024*1024)
//#define Debug yes

/****************************************************\
* Uncomment the line below to enable verbose output. *
* Enabling this should not nerf performance.         *
\****************************************************/
//#define VERBOSE

/**********************************************************\
* Uncomment the line below to enable detailed performance  *
* reporting.  This measurement alters the system, however, *
* resulting in significantly slower kernel execution.      *
\**********************************************************/
#define TIME_KERNEL
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif

#define EPS 1.1921e-7

#define TVno
#define BKinterp
//#define PDebug
//#define GDebug


/*****************
*  C   #includes *
*****************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>

/*****************
* CUDA #includes *
*****************/
#include <cuda.h>

/*****************
* FDK  #includes *
*****************/
#include "fdk.h"
//#include "fdk_cuda.h"
#include "volume.h"
#include "proj_image_set.h"
#include "CB_set_op.h"
//#include "readmha_ext.h"
#include "math_util.h"
//#include "fdk_utils_CG.h"
#include "fdk_opts.h"
#include "cuda_util.cu"
#include "drr_cuda.h"
#include "drr_cuda_p.h"
#include "drr_opts.h"
#include "fdk_cuda.h"
#include "fdk_cuda_p.h"
#include "plm_cuda_math.h"
//#include "MGHMtx_opts.h"
//#include "write_matrix.h"

//#include "bspline_opts.h"
//#include "bspline.h"
//#include "bspline_CG.h"

//#include "readmha_ext.h"

//#include "bspline_cuda.h"



#define PDebug
#define HalfCenter 64

/*********************
* High Res Win Timer *
*********************/
#include <time.h>
#if defined (_WIN32)
#include <windows.h>
#include <winbase.h>
#endif
//#ifndef MARGIN
//	static const unsigned int MARGIN = 5;
//#else
//	#error "MARGIN IS DEFINED"
//#endif

// P R O T O T Y P E S ////////////////////////////////////////////////////

//void CUDA_Pj(CB_Set * PSy, Volume *Vx);
//
//void CUDA_Bkpj(Volume * Vx, CB_Set * PSerr);
//void CUDA_check_error(const char *msg);

__global__
void kernel_drr_i_interp (  float * dev_img,float * dev_vol, int2 img_dim, float2 ic, float3 nrm, float sad, float scale, float3 vol_offset, int3 vol_dim, float3 vol_pix_spacing);

__global__
void kernel_drr_tr (float * dev_vol,  float * dev_img, int2 SBlock, int2 SBi, int2 img_dim, float2 ic, float3 nrm, float sad, float scale, float3 vol_offset, int3 vol_dim, float3 vol_pix_spacing);

///////////////////////////////////////////////////////////////////////////

// T E X T U R E S ////////////////////////////////////////////////////////
texture<float, 1, cudaReadModeElementType> tex_img;
texture<float, 1, cudaReadModeElementType> tex_matrix;
texture<float, 1, cudaReadModeElementType> tex_coef;
texture<float, 3, cudaReadModeElementType> tex_3Dvol;


//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
__global__
void kernel_drr_i_interp (  float * dev_img,float * dev_vol, int2 img_dim, float2 ic, float3 nrm, float sad, float scale, float3 vol_offset, int3 vol_dim, float3 vol_pix_spacing)
{
	// CUDA 2.0 does not allow for a 3D grid, which severely
	// limits the manipulation of large 3D arrays of data.  The
	// following code is a hack to bypass this implementation
	// limitation.
	extern __shared__ float sdata[];
	float3 ip;
	float3 vp;
	float3 dp;
	float cosa;
	float s;
	float fi,fj,fk;
	float ri,rj,rk;
	int i,j,k,
	int i1,j1,k1;
	

	long int vol_idx00, vol_idx01, vol_idx10, vol_idx11;

	unsigned int tid = threadIdx.x;

	ip.x = __int2float_rn(blockIdx.x)-ic.x;
	ip.y = __int2float_rn(blockIdx.y)-ic.y;

	if (abs(tex1Dfetch(tex_matrix, 1))>abs(tex1Dfetch(tex_matrix, 0))){

		vp.x=vol_offset.x+threadIdx.x*vol_pix_spacing.x;

		vp.y=(ip.x*tex1Dfetch(tex_matrix, 8)-tex1Dfetch(tex_matrix, 0))*vp.x+ip.x*tex1Dfetch(tex_matrix, 11);

		vp.y/=tex1Dfetch(tex_matrix, 1)-ip.y*tex1Dfetch(tex_matrix, 9);

		vp.z=ip.y*(tex1Dfetch(tex_matrix, 8)*vp.x+tex1Dfetch(tex_matrix, 9)*vp.y+tex1Dfetch(tex_matrix, 11));

		vp.z/=tex1Dfetch(tex_matrix, 6);

		i=  threadIdx.x;

		fj= (vp.y-vol_offset.y)/vol_pix_spacing.y;

		fk= (vp.z-vol_offset.z)/vol_pix_spacing.z;

		j= __float2int_rd(fj);
		k= __float2int_rd(fk);

		rj=fj-j;
		rk=fk-k;

		j1=j+1;
		k1=k+1;

		if (j1<0||j>=vol_dim.y||k1<0||k>=vol_dim.z){
			sdata[tid]=0.0;	
		}
		else{
			if (j<0) j=0;
			if (j1>=vol_dim.y) j1=vol_dim.y-1;
			if (k<0) k=0;
			if (k1>=vol_dim.z) k1=vol_dim.z-1;


			vol_idx00 = i + ( j*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
			vol_idx01 = i + ( j*(vol_dim.x) ) + ( k1*(vol_dim.x)*(vol_dim.y) );
			vol_idx10 = i + ( j1*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
			vol_idx11 = i + ( j1*(vol_dim.x) ) + ( k1*(vol_dim.x)*(vol_dim.y) );
			s = nrm.x*vp.x + nrm.y*vp.y + nrm.z*vp.z;
			s = sad - s;
			s = (sad * sad) / (s * s);
			//sdata[tid]=(1-rj)*(1-rk)*dev_vol[vol_idx00]+(1-rj)*rk*dev_vol[vol_idx01]+rj*(1-rk)*dev_vol[vol_idx10]+rj*rk*dev_vol[vol_idx11];//vol_dim.x;
			//sdata[tid]=sdata[tid]/s/scale;
			sdata[tid]=dev_vol[vol_idx00];
		}

	}
	else{

		vp.y=vol_offset.y+threadIdx.x*vol_pix_spacing.y;

		vp.x=(ip.x*tex1Dfetch(tex_matrix, 9)-tex1Dfetch(tex_matrix, 1))*vp.y+ip.x*tex1Dfetch(tex_matrix, 11);

		vp.x/=tex1Dfetch(tex_matrix, 0)-ip.y*tex1Dfetch(tex_matrix, 8);

		vp.z=ip.y*(tex1Dfetch(tex_matrix, 8)*vp.x+tex1Dfetch(tex_matrix, 9)*vp.y+tex1Dfetch(tex_matrix, 11));

		vp.z/=tex1Dfetch(tex_matrix, 6);

		fi= (vp.x-vol_offset.x)/vol_pix_spacing.x;

		j=  threadIdx.x;

		fk= (vp.z-vol_offset.z)/vol_pix_spacing.z;

		i= __float2int_rd(fi);
		k= __float2int_rd(fk);

		ri=fi-i;
		rk=fk-k;

		i1=i+1;
		k1=k+1;

		
		if (i1<0||i>=vol_dim.x||k1<0||k>=vol_dim.z){
			sdata[tid]=0.0;		
		}
		else{

			if (i<0) i=0;
			if (i1>=vol_dim.x) i1=vol_dim.x-1;
			if (k<0) k=0;
			if (k1>=vol_dim.z) k1=vol_dim.z-1;


			vol_idx00 = i + ( j*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
			vol_idx01 = i + ( j*(vol_dim.x) ) + ( k1*(vol_dim.x)*(vol_dim.y) );
			vol_idx10 = i1 + ( j*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
			vol_idx11 = i1 + ( j*(vol_dim.x) ) + ( k1*(vol_dim.x)*(vol_dim.y) );
			s = nrm.x*vp.x + nrm.y*vp.y + nrm.z*vp.z;
			s = sad - s;
			s = (sad * sad) / (s * s);
			//sdata[tid]=(1-ri)*(1-rk)*dev_vol[vol_idx00]+(1-ri)*rk*dev_vol[vol_idx01]+ri*(1-rk)*dev_vol[vol_idx10]+ri*rk*dev_vol[vol_idx11];///vol_dim.x;
			//sdata[tid]=sdata[tid]/s*scale;
			sdata[tid]=dev_vol[vol_idx00];
		}
		
	}

	//sdata[tid]=1.0f;

	__syncthreads();

    // do reduction in shared mem
    for(unsigned int s=blockDim.x/2; s>0; s>>=1) 
    {
        if (tid < s)
            sdata[tid] += sdata[tid + s];
        __syncthreads();
    }

//#ifndef __DEVICE_EMULATION__
//    if (tid < 32)
//#endif
//    {
//        sdata[tid] += sdata[tid + 32]; EMUSYNC;
//        sdata[tid] += sdata[tid + 16]; //EMUSYNC;
//        sdata[tid] += sdata[tid +  8]; //EMUSYNC;
//        sdata[tid] += sdata[tid +  4]; //EMUSYNC;
//        sdata[tid] += sdata[tid +  2]; //EMUSYNC;
//        sdata[tid] += sdata[tid +  1]; //EMUSYNC;
//    }

    // do reduction in shared mem





     //write result for this block to global mem
	if (tid == 0) {

		//if (abs(tex1Dfetch(tex_matrix, 5))>abs(tex1Dfetch(tex_matrix, 4))){
		//	dp.x=vol_pix_spacing.x;
		//	dp.y=(ip.y*tex1Dfetch(tex_matrix, 8)-tex1Dfetch(tex_matrix, 4))*dp.x;
		//	dp.y/=tex1Dfetch(tex_matrix, 5)-ip.y*tex1Dfetch(tex_matrix, 9);
		//	dp.z=ip.x*(tex1Dfetch(tex_matrix, 8)*dp.x+tex1Dfetch(tex_matrix, 9)*dp.y);
		//	dp.z/=tex1Dfetch(tex_matrix, 2);
		//	cosa=dp.x/sqrtf(dp.x*dp.x+dp.y*dp.y+dp.z*dp.z);
		//}
		//	else{
		//	dp.y=vol_pix_spacing.y;
		//	dp.x=(ip.y*tex1Dfetch(tex_matrix, 9)-tex1Dfetch(tex_matrix, 5))*dp.y;
		//	dp.x/=tex1Dfetch(tex_matrix, 4)-ip.y*tex1Dfetch(tex_matrix, 8);
		//	dp.z=ip.x*(tex1Dfetch(tex_matrix, 8)*dp.x+tex1Dfetch(tex_matrix, 9)*dp.y);
		//	dp.z/=tex1Dfetch(tex_matrix, 2);
		//	cosa=dp.y/sqrtf(dp.x*dp.x+dp.y*dp.y+dp.z*dp.z);
		//}

		//dev_img[blockIdx.x*img_dim[1] + blockIdx.y] = sdata[0];///cosa;
		//dev_img[blockIdx.y*img_dim[0] + blockIdx.x] = sdata[0];
		//dev_img[blockIdx.y*img_dim[0] + blockIdx.x] = 1;
		dev_img[blockIdx.y*img_dim.x + blockIdx.x] = sdata[tid];//img_dim[0];
		//		dev_img[0] = 1;

	}
	
}


//__global__
//void kernel_drr_tr (float * dev_vol,  float * dev_img, int2 SBlock, int2 SBi, int2 img_dim, float2 ic, float3 nrm, float sad, float scale, float3 vol_offset, int3 vol_dim, float3 vol_pix_spacing)
//{
//	// CUDA 2.0 does not allow for a 3D grid, which severely
//	// limits the manipulation of large 3D arrays of data.  The
//	// following code is a hack to bypass this implementation
//	// limitation.
//	float3 ip;
//	float3 vp;
//	float3 dp;
//	int2 img;
//	float cosa;
//	int i,j,k;
//	long int vol_idx;
//
//	unsigned int tid = threadIdx.x;
//
//	img.x=blockIdx.x*SBlock.x+SBi.x;
//	img.y=blockIdx.y*SBlock.y+SBi.y;
//
//	ip.x =__int2float_rn (img.x)-ic.x;
//	ip.y =__int2float_rn (img.y)-ic.y;
//
//	if (abs(tex1Dfetch(tex_matrix, 5))>abs(tex1Dfetch(tex_matrix, 4))){
//
//		vp.x=vol_offset.x+threadIdx.x*vol_pix_spacing.x;
//
//		vp.y=(ip.y*tex1Dfetch(tex_matrix, 8)-tex1Dfetch(tex_matrix, 4))*vp.x+ip.y*tex1Dfetch(tex_matrix, 11);
//
//		vp.y/=tex1Dfetch(tex_matrix, 5)-ip.y*tex1Dfetch(tex_matrix, 9);
//
//		vp.z=ip.x*(tex1Dfetch(tex_matrix, 8)*vp.x+tex1Dfetch(tex_matrix, 9)*vp.y+tex1Dfetch(tex_matrix, 11));
//
//		vp.z/=tex1Dfetch(tex_matrix, 2);
//
//		i=  threadIdx.x;
//
//		j=  __float2int_rd((vp.y-vol_offset.y)/vol_pix_spacing.y);
//
//		k=  __float2int_rd((vp.z-vol_offset.z)/vol_pix_spacing.z);
//
//
//		if (j<0||j>=vol_dim.y||k<0||k>=vol_dim.z){
//			//sdata[tid]=0.0;	
//		}
//		else{
//			vol_idx = i + ( j*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
//			dev_vol[vol_idx]=dev_vol[vol_idx]+dev_img[img.x*img_dim.y + img.y];///vol_dim.x;
//		}
//
//	}
//	else{
//
//		vp.y=vol_offset.y+threadIdx.x*vol_pix_spacing.y;
//
//		vp.x=(ip.y*tex1Dfetch(tex_matrix, 9)-tex1Dfetch(tex_matrix, 5))*vp.y+ip.y*tex1Dfetch(tex_matrix, 11);
//
//		vp.x/=tex1Dfetch(tex_matrix, 4)-ip.y*tex1Dfetch(tex_matrix, 8);
//
//		vp.z=ip.x*(tex1Dfetch(tex_matrix, 8)*vp.x+tex1Dfetch(tex_matrix, 9)*vp.y+tex1Dfetch(tex_matrix, 11));
//
//		vp.z/=tex1Dfetch(tex_matrix, 2);
//
//		i=  __float2int_rd((vp.x-vol_offset.x)/vol_pix_spacing.x);
//
//		j=  threadIdx.x;
//
//		k=  __float2int_rd((vp.z-vol_offset.z)/vol_pix_spacing.z);
//
//		
//		if (i<0||i>=vol_dim.x||k<0||k>=vol_dim.z){
//			//sdata[tid]=0.0;		
//		}
//		else{
//			vol_idx = i + ( j*(vol_dim.x) ) + ( k*(vol_dim.x)*(vol_dim.y) );
//			dev_vol[vol_idx]=dev_vol[vol_idx]+dev_img[img.x*img_dim.y + img.y];///vol_dim.x;
//		}
//		
//	}
//
//
//}

//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-( E N D )-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_


//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//__global__
//void kernel_fdk_interp(float *dev_vol,int* img_dim, double * ic, double * nrm, double sad, double scale, float* vol_offset, int* vol_dim, float* vol_pix_spacing, unsigned int Blocks_Y, float invBlocks_Y)
//{
//	// CUDA 2.0 does not allow for a 3D grid, which severely
//	// limits the manipulation of large 3D arrays of data.  The
//	// following code is a hack to bypass this implementation
//	// limitation.
//	unsigned int blockIdx_z = __float2uint_rd(blockIdx.y * invBlocks_Y);
//	unsigned int blockIdx_y = blockIdx.y - __umul24(blockIdx_z, Blocks_Y);
//	unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
//	unsigned int j = __umul24(blockIdx_y, blockDim.y) + threadIdx.y;
//	unsigned int k = __umul24(blockIdx_z, blockDim.z) + threadIdx.z;
//
//	if( i >= vol_dim[0] || j >= vol_dim[1] || k >= vol_dim[2] )
//		return; 
//
//	// Index row major into the volume
//	long int vol_idx = i + ( j*(vol_dim[0]) ) + ( k*(vol_dim[0])*(vol_dim[1]) );
//
//	float vp[3];
//	float ip[3];
//	float  s;
//	float voxel_data;
//	int intip[3];
//	float frcip[3];
//	int img_idx;
//
//
//	// offset volume coords
//	vp[0] = vol_offset[0] + i * vol_pix_spacing[0];	// Compiler should combine into 1 FMAD.
//	vp[1] = vol_offset[1] + j * vol_pix_spacing[1];	// Compiler should combine into 1 FMAD.
//	vp[2] = vol_offset[2] + k * vol_pix_spacing[2];	// Compiler should combine into 1 FMAD.
//
//	// matrix multiply
//	ip[0] = tex1Dfetch(tex_matrix, 0)*vp[0] + tex1Dfetch(tex_matrix, 1)*vp[1] + tex1Dfetch(tex_matrix, 2)*vp[2] + tex1Dfetch(tex_matrix, 3);
//	ip[1] = tex1Dfetch(tex_matrix, 4)*vp[0] + tex1Dfetch(tex_matrix, 5)*vp[1] + tex1Dfetch(tex_matrix, 6)*vp[2] + tex1Dfetch(tex_matrix, 7);
//	ip[2] = tex1Dfetch(tex_matrix, 8)*vp[0] + tex1Dfetch(tex_matrix, 9)*vp[1] + tex1Dfetch(tex_matrix, 10)*vp[2] + tex1Dfetch(tex_matrix, 11);
//
//	// Change coordinate systems
//	ip[0] = ic[0] + ip[0] / ip[2];
//	ip[1] = ic[1] + ip[1] / ip[2];
//
//	// Get pixel from 2D image
//	intip[0] = __float2int_rd(ip[0]);
//	intip[1] = __float2int_rd(ip[1]);
//
//	frcip[0]=ip[0]-intip[0];
//	frcip[1]=ip[1]-intip[1];
//
//	if (intip[0]<0||intip[0]>=img_dim[0]-1||intip[1]<0||intip[1]>=img_dim[1]-1)
//		return;
//	
//	
//	voxel_data =(1-frcip[0])*(1-frcip[1])* tex1Dfetch(tex_img, intip[1]*img_dim[0] + intip[0]);
//	voxel_data +=(frcip[0])*(1-frcip[1])* tex1Dfetch(tex_img, (intip[1]+1)*img_dim[0] + intip[0]);
//	voxel_data +=(1-frcip[0])*(frcip[1])* tex1Dfetch(tex_img, intip[1]*img_dim[0] + intip[0]+1);
//	voxel_data +=(frcip[0])*(frcip[1])* tex1Dfetch(tex_img, (intip[1]+1)*img_dim[0] + intip[0]+1);
//
//	// Dot product
//	s = nrm[0]*vp[0] + nrm[1]*vp[1] + nrm[2]*vp[2];
//
//	// Conebeam weighting factor
//	s = sad - s;
//	s = (sad * sad) / (s * s);
//
//	// Place it into the volume
//	dev_vol[vol_idx] += scale*s*voxel_data;///vol_dim[0];
//	//dev_vol[vol_idx] += scale *s*voxel_data;
//}









//extern "C"
//void CUDA_Pj(CB_Set * PSy, Volume *Vx)
//{
//    // CUDA device pointers
//    float *dev_Py;	            // Holds voxels on device
//    float *dev_Vx;	            // Holds image pixels on device
//	float *dev_matrix;
//	Proj_matrix *dev_pmat;
//	int iproj,i;
//	Proj_image * Py=PSy->cbi[0];
//
//	// Thead Block Dimensions
//    int tBlock_x = Vx->dim[0];
//    int tBlock_y = 1;
//    int tBlock_z = 1;
//
//    // Each element in the volume (each voxel) gets 1 thread
//    int blocksInX = Py->dim[0];
//    int blocksInY = Py->dim[1];
//    dim3 dimGrid  = dim3(blocksInX, blocksInY);
//    dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z);
//
//	cudaMalloc( (void**)&dev_Py, Py->dim[0]*Py->dim[1]*sizeof(float));
//	CUDA_check_error("Unable to allocate data volume");
//	cudaMalloc( (void**)&dev_Vx, Vx->npix*sizeof(float));
//	CUDA_check_error("Unable to allocate data volume");
//	cudaMalloc( (void**)&dev_matrix, 12*sizeof(float) );
//	CUDA_check_error("Unable to allocate data volume");
//	cudaMalloc( (void**)&dev_pmat, sizeof(Proj_matrix) );
//	CUDA_check_error("Unable to allocate data volume");
//
//	cudaMemcpy(dev_Vx,  Vx->img, Vx->npix * sizeof(float), cudaMemcpyHostToDevice );
//
//	
//
//	fflush(stdout);
//
//	for(iproj=0; iproj<PSy->nproj; iproj++){
//		//printf("\n iproj=%d", iproj);
//
//		Py=PSy->cbi[iproj];
//
//		//pmat->img_dim.x = Py->dim[0];
//		//pmat->img_dim.y = Py->dim[1];
//		//pmat->ic[0] = Py->pmat->ic[0];
//		//pmat->ic[1] = Py->pmat->ic[1];
//		//pmat->nrm[0] = Py->pmat->nrm[0];
//		//pmat->nrm[1] = Py->pmat->nrm[1];
//		//pmat->nrm[2] = Py->pmat->nrm[2];
//		//pmat->sad = Py->pmat->sad;
//		//pmat->sid = Py->pmat->sid;
//
//		//for(i=0; i<12; i++)
//		//	pmat->matrix[i] = (float)Py->pmat->matrix[i];	
//
//		cudaMemcpy( dev_matrix, Py->pmat->matrix, sizeof(Py->pmat->matrix), cudaMemcpyHostToDevice );
//		cudaBindTexture( 0, tex_matrix, dev_matrix, sizeof(Py->pmat->matrix)); 
//
//		// Invoke ze kernel  \(^_^)/
//		// Note: cbi->img AND cbi->matrix are passed via texture memory
//
//		int smemSize = Vx->dim[0]  * sizeof(float);
//
//		//-------------------------------------
//		kernel_drr_i_interp<<< dimGrid, dimBlock,  smemSize>>>(dev_Vx,dev_Py,
//			Py->dim,
//			Py->pmat->ic,
//			Py->pmat->nrm,
//			Py->pmat->sad,
//			1,
//			Vx->offset,
//			Vx->dim,
//			Vx->spacing);
//
//		CUDA_check_error("Kernel Panic!");
//
//
//		cudaThreadSynchronize();
//		cudaUnbindTexture( tex_matrix );
//
//		// Copy reconstructed volume from device to host
//		//cudaMemcpy( vol->img, dev_vol, vol->npix * vol->pix_size, cudaMemcpyDeviceToHost );
//		cudaMemcpy( Py->img, dev_Py, Py->dim[0]*Py->dim[1]*sizeof(float), cudaMemcpyDeviceToHost );
//
//		CUDA_check_error("Error: Unable to retrieve data volume.");
//
//	}
//
//	cudaFree( dev_Py);	
//	cudaFree( dev_Vx );
//	cudaFree( dev_pmat );
//	cudaFree( dev_matrix );
//
//	//cb_set_mul_add(PSy,0.00234,-80);
//
//    return;
//}



void CUDA_Pj(CB_Set * PSy, Volume *Vx, void * drr_dev_state, Drr_options * options )
{
	//Proj_image *proj;
    //Proj_matrix *pmat;
    //int a;
    Plm_timer timer;
    //void *dev_state = 0;

    /* tgt is isocenter */
    double tgt[3] = {
	options->isocenter[0],
	options->isocenter[1],
	options->isocenter[2] };

    plm_timer_start (&timer);


    /* Allocate memory on the gpu device */
   // dev_state = allocate_gpu_memory (PSy->cbi[0], Vx, options);

    /* If nrm was specified, only create a single image */
	for (int i=0; i<PSy->nproj; i++){
	double cam[3];
	double nrm[3] = {
	    PSy->cbi[i]->pmat->nrm[0],
	    PSy->cbi[i]->pmat->nrm[1],
	    PSy->cbi[i]->pmat->nrm[2], };

	/* Make sure nrm is normal */
	vec3_normalize1 (nrm);

	/* Place camera at distance "sad" from the volume isocenter */
	cam[0] = tgt[0] + options->sad * nrm[0];
	cam[1] = tgt[1] + options->sad * nrm[1];
	cam[2] = tgt[2] + options->sad * nrm[2];

	create_matrix_and_drr (Vx, PSy->cbi[i], cam, tgt, nrm, i, 
	    drr_dev_state, options);
    }



   // proj_image_destroy (proj);

    //free_gpu_memory (dev_state, options);

    printf ("Total time: %g secs\n", plm_timer_report (&timer));
}

//end


//extern "C"
void CUDA_Bkpj(Volume * Vx, CB_Set * PSerr, void *dev_state)
{
	for (int i=0; i<PSerr->nproj; i++){
		if (!delayload_cuda ()) { exit (0); }
		fdk_cuda_queue_image (
			dev_state, 
			PSerr->cbi[i]->dim, 
			PSerr->cbi[i]->pmat->ic, 
			PSerr->cbi[i]->pmat->nrm, 
			PSerr->cbi[i]->pmat->sad, 
			PSerr->cbi[i]->pmat->sid, 
			PSerr->cbi[i]->pmat->matrix,
			PSerr->cbi[i]->img
			);

		
		if (!delayload_cuda ()) { exit (0); }
		fdk_cuda_backproject (dev_state);
	}



	if (!delayload_cuda ()) { exit (0); }
	//printf("done done done done done done done done done done done done done done done \n\n\n");
	fdk_cuda_fetch_volume (dev_state, Vx->img, Vx->npix * Vx->pix_size);
}


void CUDA_Pj2(CB_Set * PSy, Volume * Vx, void *dev_state)
{
	
	float *dev_Py;	            // Holds voxels on device
    float *dev_Vx;	 
	float *dev_matrix;
	Drr_kernel_args * dev_kargs;
	
	int iproj,i;

	Proj_image * Py=PSy->cbi[0];

	Drr_cuda_state *state = (Drr_cuda_state*) dev_state;

	Drr_kernel_args *kargs=state->kargs;

	//Drr_kernel_args *dev_kargs=state->dev_kargs;



	// Thead Block Dimensions
    int tBlock_x = Vx->dim[0];
    int tBlock_y = 1;
    int tBlock_z = 1;

    // Each element in the volume (each voxel) gets 1 thread
    int blocksInX = Py->dim[0];
    int blocksInY = Py->dim[1];
    dim3 dimGrid  = dim3(blocksInX, blocksInY);
    dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z);

	
	//cudaMalloc( (void**)&dev_Py, Py->dim[0]*Py->dim[1]*sizeof(float));
	// CUDA_check_error("Unable to allocate data volume");
	//cudaMalloc( (void**)&dev_Vx, Vx->npix*sizeof(float));
	// CUDA_check_error("Unable to allocate data volume");
	cudaMalloc( (void**)&dev_matrix, 12*sizeof(float) );
	 CUDA_check_error("Unable to allocate data volume");
	cudaMalloc( (void**)&dev_kargs, sizeof(Drr_kernel_args) );
	 CUDA_check_error("Unable to allocate data volume");


	//cudaMemcpy(dev_Vx,  Vx->img, Vx->npix * sizeof(float), cudaMemcpyHostToDevice );
	//CUDA_check_error("Kernel Panic!");
	fflush(stdout);



	for(iproj=0; iproj<PSy->nproj; iproj++){
		//printf("\n iproj=%d", iproj);

		Py=PSy->cbi[iproj];

		kargs->img_dim.x = Py->dim[0];
		kargs->img_dim.y = Py->dim[1];
		kargs->ic.x = Py->pmat->ic[0];
		kargs->ic.y = Py->pmat->ic[1];
		kargs->nrm.x = Py->pmat->nrm[0];
		kargs->nrm.y = Py->pmat->nrm[1];
		kargs->nrm.z = Py->pmat->nrm[2];
		kargs->sad = Py->pmat->sad;
		kargs->sid = Py->pmat->sid;
		kargs->vol_offset.x=Vx->offset[0];
		kargs->vol_offset.y=Vx->offset[1];
		kargs->vol_offset.z=Vx->offset[2];
		kargs->vol_dim.x=Vx->dim[0];
		kargs->vol_dim.y=Vx->dim[1];
		kargs->vol_dim.z=Vx->dim[2];
		kargs->vol_spacing.x=Vx->spacing[0];
		kargs->vol_spacing.y=Vx->spacing[1];
		kargs->vol_spacing.z=Vx->spacing[2];

		for(i=0; i<12; i++)
			kargs->matrix[i] = (float)Py->pmat->matrix[i];	

		//cudaMemcpy( state->dev_matrix, kargs->matrix, sizeof(kargs->matrix), cudaMemcpyHostToDevice );
		cudaMemcpy( dev_matrix, kargs->matrix, sizeof(kargs->matrix), cudaMemcpyHostToDevice );
		CUDA_check_error("Kernel Panic!");
		//cudaBindTexture( 0, tex_matrix, state->dev_matrix, sizeof(kargs->matrix)); 
		cudaBindTexture( 0, tex_matrix, dev_matrix, sizeof(kargs->matrix));
	
		CUDA_check_error("Kernel Panic!");

		int smemSize = Vx->dim[0]  * sizeof(float);
	


		//		//------------
		kernel_drr_i_interp<<< dimGrid, dimBlock,  smemSize>>>(state->dev_img,state->dev_vol,
			kargs->img_dim,
			kargs->ic,
			kargs->nrm,
			kargs->sad,
			1.0f,
			kargs->vol_offset,
			kargs->vol_dim,
			kargs->vol_spacing);

		/*void kernel_drr_i_interp (  float * dev_img,float * dev_vol, int* img_dim, double * ic, double * nrm, double sad, double scale, float * vol_offset, int * vol_dim, float * vol_pix_spacing)*/


		CUDA_check_error("Kernel Panic!");
		


		//cudaThreadSynchronize();
		//CUDA_check_error("Error");
		//cudaUnbindTexture( tex_matrix );
		//CUDA_check_error("Error");

 		cudaMemcpy( Py->img, state->dev_img, Py->dim[0]*Py->dim[1]*sizeof(float), cudaMemcpyDeviceToHost );

		CUDA_check_error("Error: Unable to retrieve data volume.");

		char * mat_fn=0;
		char fname[256];
		sprintf(fname,"C:\\AAAFiles\\CBCTData\\DRR2\\Proj_%d.raw",iproj);
		proj_image_save (PSy->cbi[iproj],fname , mat_fn);

		iproj=iproj;
		//printf("%d'th DRR\n",iproj);
		cudaUnbindTexture(tex_matrix);
		CUDA_check_error("CUDA Panic!");
		
		
	}
	cudaFree(dev_matrix);
	cudaFree(dev_kargs);
	CUDA_check_error("CUDA Panic!");
	printf("Done");


}




	
	

