#include "CudaSPH.h"
#include "CudaKernels.h"

#include <thrust\sort.h>
#include <cuda.h>
#include <cuda_runtime.h>

// XXX This is for testing

#define TESTING

// Constants
__constant__ float m_gridMin[3];
__constant__ int m_numCells[3];
__constant__ float m_cellWidth[3];
__constant__ unsigned int m_octree_num_levs;

/// physical parameters
__constant__ float m_ts;
__constant__ float m_k;
__constant__ float m_visc;
__constant__ float m_gamma;
__constant__ float m_supportRad;
__constant__ float m_restDens;


#define OTLEV_MULT -0.1f

// For the grid max boundary.
#define GRID_EPSILON 0.00001f

// Calls wrapped with DBG_CUDA_CALL will not be checked for errors if DO_DEBUG is set
#define DO_DEBUG

#ifdef DO_DEBUG

#define DBG_CUDA_CALL(FUNC, STR) { \
	cudaError_t err = FUNC; \
	if (err != CUDA_SUCCESS){ fprintf(stderr, "%s -- API Error Desc: %s\n", STR, cudaGetErrorString(err)); } }

#define DBG_CUDA_CHECK_ERRORS DBG_CUDA_CALL(cudaGetLastError(), "Checking last error")
	  
#else
#define DBG_CUDA_CALL(FUNC, STR) { FUNC; } 
#define DBG_CUDA_CHECK_ERRORS {}
#endif

CudaSPH::CudaSPH( 
	glm::vec3 & gridMin,
	glm::ivec3 numCells, 
	glm::vec3 cellWidth ) :
	mD_parts(NULL), mD_parts_gNdx(NULL), m_part_num(0), 
	mD_parts_forces(NULL), mD_parts_press(NULL), mD_parts_dens(NULL),
	mD_grid_ndx(NULL), mD_grid_end(NULL), m_firstRun(true)
	{
		AllocateGridDevice(gridMin, numCells, cellWidth);
	}

void CudaSPH::SetSPHParams( 
	float ts, float k, float visc,
	float gamma, float supportRad) {
	
		// Allocate the constant memory space for the physical parameters.
	
		DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_ts", &ts, sizeof(float), 0, cudaMemcpyHostToDevice),
		"Error copying m_ts to constant memory");

		DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_k", &k, sizeof(float), 0, cudaMemcpyHostToDevice),
		"Error copying m_k to constant memory");

		DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_visc", &visc, sizeof(float), 0, cudaMemcpyHostToDevice),
		"Error copying m_visc to constant memory");

		DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_gamma", &gamma, sizeof(float), 0, cudaMemcpyHostToDevice),
		"Error copying m_gamma to constant memory");

		DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_supportRad", &supportRad, sizeof(float), 0, cudaMemcpyHostToDevice),
		"Error copying m_supportRad to constant memory");
}

void CudaSPH::AllocateOctree() {
	//Calculate the number of levels in the octree.
	int gNumCells [3];
	DBG_CUDA_CALL(
		cudaMemcpyFromSymbol(&gNumCells[0], "m_numCells", sizeof(int)*3),
		"Error copying from constant m_numCells to host memory" );

	unsigned int numLevs = std::ceil(std::log((float)gNumCells[0]) / std::log(2.0f)) 
		+ 1;

	// Copy to constant
	DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_octree_num_levs", &numLevs, sizeof(unsigned int)),
		"Error copying from host to constant m_octree_num_levs");

	//printf("Number of levels in the octree: %d\n", numLevs);
	//

	int totMem = 0;

	for (int i = 0; i < numLevs; ++i){
		totMem += std::pow(8.0, i);
	}

	m_totOctreeCells = totMem;

	/*printf("Total number of cells: %d\n", totMem);*/

	DBG_CUDA_CALL(
		cudaMalloc(&mD_octree_eend, sizeof(unsigned int) * totMem),
		"Error allocating octree_eend array\n");

	DBG_CUDA_CALL(
		cudaMalloc(&mD_octree_EoF, sizeof(unsigned int) * totMem),
		"Error allocating octree_eend array\n");

	DBG_CUDA_CALL(
		cudaMalloc(&mD_octree_sndx, sizeof(unsigned int) * totMem),
		"Error allocating octree_eend array\n");

}

void CudaSPH::AllocateGridDevice(
	glm::vec3 & gridMin, 
	glm::ivec3 & numCells,
	glm::vec3 & cellWidth)
{
	// Copy grid information to constant memory
	DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_gridMin", &gridMin[0], sizeof(float)*3, 0, cudaMemcpyHostToDevice),
		"Error copying m_gridMin to constant memory");

	DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_numCells", &numCells[0], sizeof(int)*3, 0, cudaMemcpyHostToDevice),
		"Error copying m_numCells to constant memory");

	DBG_CUDA_CALL(
		cudaMemcpyToSymbol("m_cellWidth", &cellWidth[0], sizeof(float)*3, 0, cudaMemcpyHostToDevice),
		"Error copying m_cellWidth to constant memory");

	// Allocates device array 
	unsigned int totCells = numCells.x*numCells.y*numCells.z;
	
	DBG_CUDA_CALL(
		cudaMalloc(&mD_grid_end, sizeof(int)*totCells),
		"Error allocating memory for mD_grid_end")

	DBG_CUDA_CALL(
		cudaMemset(mD_grid_end, 0, sizeof(int)*totCells),
		"Error initializing mD_grid_end")

	DBG_CUDA_CALL(
		cudaMalloc(&mD_grid_ndx, sizeof(int)*totCells),
		"Error allocated memory for grid ndx's")

	DBG_CUDA_CALL(
		cudaMemset(mD_grid_ndx, 0, sizeof(int)*totCells),
		"Error initializing mD_grid_end")

	// Allocates octree
	AllocateOctree();
}

void CudaSPH::DeallocateParticles(){

	// Deallocate the current particles.
	DBG_CUDA_CALL(cudaFree(mD_parts), "Error freeing memory for particle positions")
	DBG_CUDA_CALL(cudaFree(mD_parts_gNdx), "Error freeing memory for particle grid ndx")
	DBG_CUDA_CALL(cudaFree(mD_parts_forces), "Error freeing memory for particle forces")
	DBG_CUDA_CALL(cudaFree(mD_parts_press), "Error freeing memory for particle pressures")
	DBG_CUDA_CALL(cudaFree(mD_parts_dens), "Error freeing memory for particle densities")
	
}

void CudaSPH::AllocateParticlesDevice( const std::vector< CudaParticle > & inParts ){
	DeallocateParticles();
	
	unsigned int totParts = inParts.size();

	DBG_CUDA_CALL(
		cudaMalloc(&mD_parts, sizeof(CudaParticle)*totParts),
		"Error allocating memory for particles" )

	DBG_CUDA_CALL(
		cudaMalloc(&mD_parts_press, sizeof(float)*totParts),
		"Error allocating pressure memory for particles" )

	DBG_CUDA_CALL(
		cudaMalloc(&mD_parts_dens, sizeof(float)*totParts),
		"Error allocating density memory for particles" )

	DBG_CUDA_CALL(
		cudaMalloc(&mD_parts_gNdx, sizeof(int)*totParts),
		"Error allocating memory for particle grid ndxs" )

	DBG_CUDA_CALL(
		cudaMalloc(&mD_parts_forces, sizeof(float)*totParts*3),
		"Error allocating memory for particle forces" );


	// Transfer data from host to device.
	DBG_CUDA_CALL(
		cudaMemcpy(mD_parts, &inParts[0], sizeof(CudaParticle)*inParts.size(), cudaMemcpyHostToDevice),
		"Error transfering particles positions to device.")
}

void CudaSPH::GetForces(std::vector<float> & outForce ) {
	outForce.resize(m_part_num*3);
	if (m_part_num <= 0){ return; }

	DBG_CUDA_CALL(
		cudaMemcpy(&outForce[0], mD_parts_forces, m_part_num*sizeof(float)*3, cudaMemcpyDeviceToHost),
		"Error transfering device particle velocities to host." );
}

void CudaSPH::GetPress(std::vector<float> & outPres ) {
	outPres.resize(m_part_num);
	if (m_part_num <= 0){ return; }

	DBG_CUDA_CALL(
		cudaMemcpy(&outPres[0], mD_parts_press, m_part_num*sizeof(float), cudaMemcpyDeviceToHost),
		"Error transfering device particle pressure to host." );
}

void CudaSPH::GetDens(std::vector<float> & outDens ) {
	outDens.resize(m_part_num);
	if (m_part_num <= 0){ return; }

	DBG_CUDA_CALL(
		cudaMemcpy(&outDens[0], mD_parts_dens, m_part_num*sizeof(float), cudaMemcpyDeviceToHost),
		"Error transfering device particle density to host." );
}


void CudaSPH::GetParticles(std::vector<CudaParticle> & outParts){
	outParts.resize(m_part_num);
	
	if (m_part_num <= 0){ return; }


	DBG_CUDA_CALL(
		cudaMemcpy(&outParts[0], mD_parts, m_part_num*sizeof(CudaParticle), cudaMemcpyDeviceToHost),
		"Error transfering device particle velocities to host." )
}

void CudaSPH::GetOctreeEoF(std::vector<unsigned int> & outOctreeEoF) {
	outOctreeEoF.resize(m_totOctreeCells);

	DBG_CUDA_CALL(
		cudaMemcpy(&outOctreeEoF[0], mD_octree_EoF, sizeof(unsigned int)*this->m_totOctreeCells, cudaMemcpyDeviceToHost),
		"Error copying octreeEoF to host");
}

unsigned int CudaSPH::GetOctreeNumLevs() {
	unsigned int num;

	DBG_CUDA_CALL(
		cudaMemcpyFromSymbol(&num, "m_octree_num_levs", sizeof(unsigned int)),
		"Error getting constant m_octree_num_levs" );

	return num;
}

void CudaSPH::GetGridEnd( std::vector<int> & outGrid){
	// Transfer m_numCells to host.
	int numCells[3] = {0,0,0};

	DBG_CUDA_CALL(cudaMemcpyFromSymbol(numCells, "m_numCells", sizeof(int)*3, 0, cudaMemcpyDeviceToHost),
		"Error retrieving m_numCells from constant memory");

	int tot = numCells[0]*numCells[1]*numCells[2];
	outGrid.resize(tot);

	DBG_CUDA_CALL(
		cudaMemcpy(&outGrid[0], mD_grid_end, sizeof(int)*tot, cudaMemcpyDeviceToHost),
		"Error transfering device grid end ndx to host." )
}


void CudaSPH::GetGridNdx( std::vector<int> & outGrid){
	// Transfer m_numCells to host.
	int numCells[3] = {0,0,0};

	DBG_CUDA_CALL(cudaMemcpyFromSymbol(numCells, "m_numCells", sizeof(int)*3, 0, cudaMemcpyDeviceToHost),
		"Error retrieving m_numCells from constant memory");

	int tot = numCells[0]*numCells[1]*numCells[2];
	outGrid.resize(tot);

	DBG_CUDA_CALL(
		cudaMemcpy(&outGrid[0], mD_grid_ndx, sizeof(int)*tot, cudaMemcpyDeviceToHost),
		"Error transfering device grid end ndx to host." )
}

__device__ __host__ int Interleave3i ( int a, int b, int c ){
	a = (a | (a << 16)) & 0x030000FF;
	a = (a | (a << 8)) & 0x0300F00F;
	a = (a | (a << 4)) & 0x030C30C3;
	a = (a | (a << 2)) & 0x09249249;

	b = (b | (b << 16)) & 0x030000FF;
	b = (b | (b << 8)) & 0x0300F00F;
	b = (b | (b << 4)) & 0x030C30C3;
	b = (b | (b << 2)) & 0x09249249;	
	
	c = (c | (c << 16)) & 0x030000FF;
	c = (c | (c << 8)) & 0x0300F00F;
	c = (c | (c << 4)) & 0x030C30C3;
	c = (c | (c << 2)) & 0x09249249;

	return ( a | (b<<1) | (c<<2) );
}

__device__ int D_GetCartGridNdx(const  float * pos ){
	float gMin[3] = {
		m_gridMin[0],
		m_gridMin[1],
		m_gridMin[2]
	};

	float relP[3] = {pos[0] - gMin[0], pos[1] - gMin[1], pos[2] - gMin[2]};

	float max[3] = {
		gMin[0] + m_cellWidth[0]*m_numCells[0] + GRID_EPSILON,
		gMin[1] + m_cellWidth[1]*m_numCells[1] + GRID_EPSILON,
		gMin[2] + m_cellWidth[2]*m_numCells[2] + GRID_EPSILON};

	if (relP[0] < 0 || relP[0] >= max[0] ||
		relP[1] < 0 || relP[1] >= max[1] ||
		relP[2] < 0 || relP[2] >= max[2] ){

		return -1;
	}

	return ( floor(relP[0]/m_cellWidth[0]) + 
		( floor(relP[1]/m_cellWidth[1]) ) * m_numCells[0] +
		(  floor(relP[2]/m_cellWidth[2]) ) * m_numCells[0]*m_numCells[1] );
}

__global__ void K_AssignGNdx ( CudaParticle * inParts, 
	int * inPGNdx, 
	int numParts ){
	
	unsigned int ndx = blockIdx.x*blockDim.x + threadIdx.x;
	if (ndx >= numParts)
		return;

	CudaParticle * p = &inParts[ndx];
	float relP[3] = {
		p->pos[0] - m_gridMin[0], 
		p->pos[1] - m_gridMin[1], 
		p->pos[2] - m_gridMin[2]
	};

	//XXX Fix this. there is a little epsilon
	if (relP[0] < 0 || relP[0] >= m_gridMin[0] + m_cellWidth[0]*m_numCells[0] + GRID_EPSILON ||
		relP[1] < 0 || relP[1] >= m_gridMin[1] + m_cellWidth[1]*m_numCells[1] + GRID_EPSILON ||
		relP[2] < 0 || relP[2] >= m_gridMin[2] + m_cellWidth[2]*m_numCells[2] + GRID_EPSILON ){

		inPGNdx[ndx] = -1;
	}
	else {
		// Z-Order -- Convert integer gNdx to the z-order index.
		inPGNdx[ndx] = Interleave3i( 
			floor(relP[0]/m_cellWidth[0]), //preZorderGNdx[0], 
			floor(relP[1]/m_cellWidth[1]), //preZorderGNdx[1]
			floor(relP[2]/m_cellWidth[2])  //preZorderGNdx[2]
		); 
	}

	return;
}

__global__ void K_FillGrid(int * inPGNdx, CudaParticle * inParts, int * inGNdx, int *inGEnd, unsigned int numParts )
{
	unsigned int ndx = blockIdx.x*blockDim.x + threadIdx.x;
	if (ndx >= numParts)
		return;

	float minTs = m_ts + OTLEV_MULT*(m_ts*m_octree_num_levs);
	inParts[ndx].currTime -= minTs;

	int gNdx = inPGNdx[ndx];
	int physGndx = D_GetCartGridNdx( &inParts[ndx].pos[0] );

	// XXX Should we map back to the original grid.
	// Points are sorted via z order and assigned their morton grid ndx.
	// But should we assign the particles to that same ndx in the physical
	// array or should we map back to our "cartesian" physical array?

	if (gNdx == -1){
		return;
	}

	// Check if we are on left boundary of interval or at the very beginning of the array.
	if (ndx != 0){
		if (inPGNdx[ndx-1] != gNdx) {
			inGNdx[physGndx] = ndx;
		}
	} else {
		inGNdx[physGndx] = 0;
	}
		
	// Check if we are on the right boundary of the interval or at the very end of the array.
	if (ndx != numParts - 1){
		if (inPGNdx[ndx+1] != gNdx){
			inGEnd[physGndx] = ndx+1;
		}
	} else {
		inGEnd[physGndx] = numParts;
	}
}

// XXX Integrating over each grid cell is slower than launching 1 thread per particle, but 
// less complicated ...

// Fills neighbor cells.
__device__ void D_GetNeighbors2Rad4( float * pos, int gNdx, float * cellLowerC, int * outN){
	// x
	if ((pos[0]-cellLowerC[0])/m_cellWidth[0] < 0.5){
		outN[0] = gNdx-1;
	}
	else {
		outN[0] = gNdx+1;
	}
	outN[2] = outN[0];

	if ( (pos[1]-cellLowerC[1])/m_cellWidth[1] < 0.5 ){
		outN[1] = gNdx-m_numCells[0];
		outN[2] -= m_numCells[0];
	}
	else {
		outN[1] = gNdx+m_numCells[0];
		outN[2] += m_numCells[0];
	}

	outN[3] = gNdx;
}

__device__ void D_GetNeighbors2Rad9( float * pos, int gNdx, float * cellLowerC, int * outN){
	// x
		
	outN[0] = gNdx - m_numCells[0]-1;
	outN[1] = gNdx - m_numCells[0];
	outN[2] = gNdx - m_numCells[0]+1;
	outN[3] = gNdx-1;
	outN[4] = gNdx;
	outN[5] = gNdx+1;
	outN[6] = gNdx + m_numCells[0]-1;
	outN[7] = gNdx + m_numCells[0];
	outN[8] = gNdx + m_numCells[0]+1;
}

//XXX Can make sure each block has it's onwn memory share with others around it


// Each grid cell
__global__ void K_IntegrateGridPressure (CudaParticle * parts,
	int numParts,
	int * gridNdx, 
	int * gridEnd, 
	float mass, 
	float * outPress, 
	float * outDens) 
{
	unsigned int pndx = blockIdx.x*blockDim.x + threadIdx.x;
	if (pndx >= numParts){
		return;
	}


	//if (parts[pndx].cntr > 0){
	//	return;
	//}
	//
	int totCells = m_numCells[0]*m_numCells[1]*m_numCells[2];
	// Get grid cell ndx
	glm::vec3 & pos = parts[pndx].pos;

	int ndx = D_GetCartGridNdx(&pos[0]);

	int pgndx = gridNdx[ndx];
	int pgend = gridEnd[ndx];

	int nGndx[4];

	float lowerC[3] = 
	{
		(ndx % m_numCells[0])*m_cellWidth[0],
		(ndx / m_numCells[0])*m_cellWidth[1],
		0
	};

	// Iterate over each particle
	// Once for pressure
	// next for force computation

	/// Pressure
	float pres;
	float dens = 0;

	// Retrieves the neighbor grid cells.
	D_GetNeighbors2Rad4(&pos[0], ndx,lowerC, nGndx);
	

	// XXX This has a bug, but should be fine for now...it will get gridcells on the other side on bondaries.
	// check when implementing octree
	for (int j = 0; j < 4; ++j){
			
		if (nGndx[j] < 0 || nGndx[j] >= totCells){
			continue;
		}
		int nGNdx = nGndx[j];
		for (int p = gridNdx[nGNdx]; p < gridEnd[nGNdx]; ++p) { 
			float diff[3] = 
			{
				pos[0] - parts[p].pos[0],
				pos[1] - parts[p].pos[1],
				pos[2] - parts[p].pos[2]
			};

			float radsq = diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2];
			float kern = KernelPoly6( radsq, m_supportRad );

			dens += mass * kern;
		}
	}

	outPress[pndx] = m_k * (pow(dens/m_restDens, m_gamma) - 1);
	outDens[pndx] = dens;


	__threadfence_system();
	/// Force computation
}

__global__ void K_IntegrateGridForce(
	CudaParticle * parts,
	int numparts,
	int * gridNdx, 
	int * gridEnd, 
	float * dPress, 
	float * dDens,
	float mass,
	float * outF)
{
	
	unsigned int pndx = blockIdx.x*blockDim.x + threadIdx.x;

	if (pndx >= numparts){
		return;
	}


	//if (parts[pndx].currTime > 0){
	//	return;
	//}

	//if (parts[pndx].cntr > 0){
	//	return;
	//}

	int totCells = m_numCells[0]*m_numCells[1]*m_numCells[2];


	float * pos = &parts[pndx].pos[0];
	float * vel = &parts[pndx].vel[0];

	int ndx = D_GetCartGridNdx(pos);

	float lowerC[3] = 
	{
		(ndx % m_numCells[0])*m_cellWidth[0],
		(ndx / m_numCells[0])*m_cellWidth[1],
		0
	};

	int nGndx[4];
	float diff[3];
	float diffv[3];
	float normdiffpos[3];
	float sampleddens;
	float sampledpres;
	float * f;

	// Iterate over each particle
	// and compute new force


	f = &outF[pndx*3];
	f[0] = f[1] = f[2] = 0.0f;
	// Retrieves the neighbor grid cells.
	D_GetNeighbors2Rad4(pos, ndx, lowerC, nGndx);
	
	float dens = dDens[pndx];
	float pres = dPress[pndx];
	// XXX This has a bug, but should be fine for now...it will get gridcells on the other side on bondaries.
	// check when implementing octree
	for (int j = 0; j < 4; ++j){
		if (nGndx[j] < 0 || nGndx[j] >= totCells){
			continue;
		}

		for (int p = gridNdx[nGndx[j]]; p < gridEnd[nGndx[j]]; ++p) { 
			normdiffpos[0] = normdiffpos[1] = normdiffpos[2] = 0;
			sampleddens = dDens[p];
			sampledpres = dPress[p];
			for (int c = 0; c < 3; ++c){
				diff[c] = pos[c] - parts[p].pos[c];
				diffv[c] = parts[p].vel[c] - vel[c];
			}

			float r = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]);

			for (int c = 0; c < 3; ++c){
				if (r != 0) {
					normdiffpos[c] = diff[c]/r;
				}
			}

			for (unsigned int c = 0; c < 3; ++c){
				f[c] += -1*
					(( mass * (sampledpres + pres) *
					(1.0/(2*sampleddens) ))* 
					normdiffpos[c] * 
					KernelSpikeyGrad(r, m_supportRad));
			}

			//Viscosity
			for (unsigned int c = 0; c < 3; ++c){
				f[c] += m_visc *
					(mass * (diffv[c]/(sampleddens == 0 ? 1:sampleddens))*
					KernelViscosityLaplac(r, m_supportRad));
			}
		}//endfor each neighbor grid cell
		
	} //endfor neighbor particles.

// Endfor particle in current gridcell
}

__global__ void K_TakeStep(
	CudaParticle * inParts,
	int numParts,
	float * inForces,
	float mass){

	unsigned int pndx = blockIdx.x*blockDim.x + threadIdx.x;
	
	if (pndx >= numParts){
		return;
	}
	//
	//if (inParts[pndx].cntr > 0){
	//	return;
	//}


	// XXX Testing 
	// Calculate current timestep

	
	// XXX otLev == 0 means only subdivided once (second level of tree).

	int otLev = /*m_octree_num_levs - */inParts[pndx].otLev;

#ifdef TESTING
	float ts = m_ts + OTLEV_MULT*m_ts*otLev;

#else
	float ts = m_ts;
#endif



	if (inParts[pndx].currTime <= 0) {
		float timeToTake = ts;

		// Catch up
		if (inParts[pndx].currTime < 0 &&
			inParts[pndx].lastLev != otLev){
			timeToTake = fabs(inParts[pndx].currTime);
		}


		for (int c = 0; c < 3; ++c){
			inParts[pndx].vel[c] += (inForces[pndx*3 + c]/mass) * timeToTake;
			if (c==1){
				inParts[pndx].vel[c] += -9.81*timeToTake; // gravity //
			}

			inParts[pndx].pos[c] += inParts[pndx].vel[c]*timeToTake;
			
			//XXX Fix this
			if (inParts[pndx].pos[c] < m_gridMin[c] ){
				inParts[pndx].pos[c] = m_gridMin[c];
				inParts[pndx].vel[c] *= 0;
			}
			if (
				inParts[pndx].pos[c] >= (m_gridMin[c] + m_cellWidth[c]*m_numCells[c]))
			{
				inParts[pndx].pos[c] = m_gridMin[c] + m_cellWidth[c]*m_numCells[c] - 0.0001;
				inParts[pndx].vel[c] *= 0;
			}
		}
		inParts[pndx].currTime += timeToTake;
	}

	
		//inParts[pndx].currTime -= ts;



	__threadfence_system();
}

__device__ int D_GetNumFillNeighbors ( 
	int oNdx ) {
	
}

__global__ void K_MarkParts( 
	CudaParticle * inParts,
	int numParts,
	int * inPGNdx,
	unsigned int * oEoF,
	bool isFirst){


	unsigned int pndx = blockIdx.x*blockDim.x + threadIdx.x;
	
	if (pndx >= numParts){
		return;
	}

	int lastC = inParts[pndx].cntr;
	inParts[pndx].cntr--;

	// On first run, cntr is 0 anyways, so shouldn't be a problem
	if (lastC > 0){
		return;
	}

	// Get the leaf ndx
	int oNdx = inPGNdx[pndx];

	// Traverse up the tree and see what parent cells are doing.
	
	// Look for surface
	int numPartTag = 0;

	if (oEoF[oNdx] != 0){
		numPartTag++;
	}

	// Fill in rest
	int totLeaf = pow(8.0f, float(m_octree_num_levs-1));
	int scLev = totLeaf;
	// Now loop through all levels starting with the one above the leaf level.
	for (int cLev = m_octree_num_levs-2; cLev >=0; --cLev ){

		oNdx = oNdx >> 3;

		if (oEoF[scLev + oNdx] != 0){
			numPartTag++;
		}

		scLev += pow(8.0f, float(cLev));
	}

	inParts[pndx].lastLev = inParts[pndx].otLev;
	inParts[pndx].otLev = numPartTag;

	// Set cntr based on num_levs - level the particle is in.
	//if (!isFirst) {
	//	inParts[pndx].cntr = m_octree_num_levs - numPartTag;

	//}
	//else
	{
		inParts[pndx].cntr = 0;
	}
}

__global__ void K_FillOctree( CudaParticle * inParts, int numParts,
	int * inPGNdx, int * gStart, int * gEnd,
	unsigned int * oSndx,
	unsigned int * oEndx, unsigned int * oEoF ){

	unsigned int pndx = blockIdx.x*blockDim.x + threadIdx.x;
	
	if (pndx >= numParts){
		return;
	}



	//if (inParts[pndx].currTime > 0){
	//	return;
	//}

	//if (inParts[pndx].cntr > 0){
	//	return;
	//}

	// Get the leaf ndx
	int oNdx = inPGNdx[pndx];
	int physGndx = D_GetCartGridNdx( &inParts[pndx].pos[0] );

	// Determine if this leaf is on the surface or not.
	float lowerC[3] = 
	{
		(physGndx % m_numCells[0])*m_cellWidth[0],
		(physGndx / m_numCells[0])*m_cellWidth[1],
		0
	};
	int nGndx[9];
	D_GetNeighbors2Rad9(&inParts[pndx].pos[0], physGndx, lowerC, nGndx);
	int totCells = m_numCells[0]*m_numCells[1]*m_numCells[2];
	int num = 0;
	
	for (int i = 0; i < 9; ++i){

		// If the cell is empty OR if the cell is on a boundary
		if (gStart[nGndx[i]] != -1 ||
			nGndx[i] < 0 ||
			nGndx[i] >= totCells){
			num++;
		}
	}

	// Look for surface
	if (num < 9 && num > 0){
		atomicOr(&oEoF[oNdx], 0x01);

		// Fill in rest
		int totLeaf = pow(8.0f, float(m_octree_num_levs-1));

		int scLev = totLeaf;
		// Now loop through all levels starting with the one above the leaf level.
		for (int cLev = m_octree_num_levs-2; cLev >=0; --cLev ){

			oNdx = oNdx >> 3;

			atomicOr(&oEoF[scLev + oNdx], 0x01);

			scLev += pow(8.0f, float(cLev));
		}
	}



//	int gNdx = D_GetCartGridNdx(&inParts[pndx].pos[0]);



}


float CudaSPH::GetAverageSerial( const std::vector<float> & inVect ) {
	float avg = 0;
	for (int i = 0; i < inVect.size(); ++i){
		avg += inVect[i];
	}
	return avg/inVect.size();
}

void CudaSPH::ClearOctree() {
	// Sets octree arrays to 0.
	DBG_CUDA_CALL(
		cudaMemset(mD_octree_EoF, 0, sizeof(unsigned int)*this->m_totOctreeCells),
		"Error trying to set octree EoF to 0");
	
	DBG_CUDA_CALL(
		cudaMemset(mD_octree_sndx, 0, sizeof(unsigned int)*this->m_totOctreeCells),
		"Error trying to set octree sndx to 0");
	
	DBG_CUDA_CALL(
		cudaMemset(mD_octree_eend, 0, sizeof(unsigned int)*this->m_totOctreeCells),
		"Error trying to set octree eend to 0");

}

void CudaSPH::Update( )
{
	bool wasFirst = m_firstRun;
	//Now we actually update the particle's positions.
	int numCells[3] = {0,0,0};
	// XXX Expensive. See what we can do about mirroring constant device memory to host memory
	DBG_CUDA_CALL(cudaMemcpyFromSymbol(numCells, "m_numCells", sizeof(int)*3, 0, cudaMemcpyDeviceToHost),
		"Error retrieving m_numCells from constant memory");

	// Empty the octree
	ClearOctree();

	float totGridCells = numCells[0]*numCells[1]*numCells[2];

	DBG_CUDA_CALL(cudaMemset(mD_grid_ndx, -1, sizeof(int)*(totGridCells)),
		"Error setting mD_grid_ndx to zero before filling the grid");

	DBG_CUDA_CALL(cudaMemset(mD_grid_end, -1, sizeof(int)*(totGridCells)),
		"Error setting mD_grid_end to zero before filling the grid");

	if (m_part_num == 0){
		return;
	}
	//
	// Sort the particles first by their key.

	// Determine number of blocks and threads.
	// nb_p is for kernels to run per thread
	// nb_b is for kernels to run per block.
	int ntb = 512;
	int nb_p = (m_part_num / ntb) + 1;


	int nb_b = (totGridCells / ntb)+1;

	// Fill key block
	K_AssignGNdx<<< nb_p, ntb >>>(mD_parts, mD_parts_gNdx, m_part_num);
	DBG_CUDA_CHECK_ERRORS


	// Sort particles
	// Create a device pointer
	thrust::device_ptr<int> dparts_gndx = thrust::device_pointer_cast<int>(mD_parts_gNdx);
	thrust::device_ptr<CudaParticle> dparts = thrust::device_pointer_cast<CudaParticle>(mD_parts);
	thrust::sort_by_key(dparts_gndx, dparts_gndx + m_part_num, dparts);


	//XXX Mirror constant variables

	K_FillGrid<<< nb_p, ntb >>> (
		mD_parts_gNdx, mD_parts, mD_grid_ndx, mD_grid_end, m_part_num
	);

	K_FillOctree<<< nb_p, ntb >>> (
		mD_parts, m_part_num, 
		mD_parts_gNdx, mD_grid_ndx, mD_grid_end,
		mD_octree_sndx, mD_octree_eend, mD_octree_EoF 
	);

	K_MarkParts<<< nb_p, ntb >>> (
		mD_parts, m_part_num, mD_parts_gNdx, mD_octree_EoF, wasFirst
	);


	//std::vector<unsigned int> testeof(this->m_totOctreeCells);
	////XXX test
	//DBG_CUDA_CALL(cudaMemcpy(&testeof[0], mD_octree_EoF, sizeof(unsigned int)*m_totOctreeCells, cudaMemcpyDeviceToHost),
	//	"ERROR TEST");


	// If this is the first time, then m_firstRun == true and the output parts_press will be bad (restDens is set to 0).

	K_IntegrateGridPressure<<< nb_p, ntb >>> (
		mD_parts,
		m_part_num,
		mD_grid_ndx, 
		mD_grid_end,
		m_mass, 
		mD_parts_press,
		mD_parts_dens);

	// If this is the first run, we compute the average rest density from the input particles.

	if (m_firstRun){
		// Compute the rest density.
		std::vector<float> dens( m_part_num );

		DBG_CUDA_CALL( cudaMemcpy( &dens[0], mD_parts_dens, m_part_num*sizeof(float), cudaMemcpyDeviceToHost),
			"Error getting density during init to compute rest density\n");
		
		float restDens = GetAverageSerial( dens );

		DBG_CUDA_CALL( cudaMemcpyToSymbol("m_restDens", &restDens, sizeof(float) ),
			"Error copying rest density to constant variable." );

		m_firstRun = false;
	
		return;
	}

	K_IntegrateGridForce<<< nb_p, ntb >>> (
		mD_parts,
		m_part_num,
		mD_grid_ndx, 
		mD_grid_end,
		mD_parts_press,
		mD_parts_dens,
		m_mass, 
		mD_parts_forces);


	
	// Take a timestep
	K_TakeStep<<<nb_p, ntb>>>( 
		mD_parts,
		m_part_num,
		mD_parts_forces,
		m_mass);

}


