
#include "common_header.h"
#include "solver_lcpcg_cuda.h"
#include "helpers/timer.h"
#include <float.h>

//////////////////////////////////////////////////////////////////////////
#include "fem/joints.h"
//////////////////////////////////////////////////////////////////////////

#define FORM_A_SPARSE(j, t0, t1, t2)\
			idx = ptr_m_JtdNodes_0##j[i];\
			\
			val_arr(m_RHS) -= (	ptr_m_J_##t0[i] * ptr_m_Ftot_x[idx] + \
								ptr_m_J_##t1[i] * ptr_m_Ftot_y[idx] + \
								ptr_m_J_##t2[i] * ptr_m_Ftot_z[idx] );\
			\
			inc_arr( m_Asp_##t0 ) = dt * \
				( ptr_m_NodeInvMass_00[idx] * ptr_m_J_##t0[i] \
				+ ptr_m_NodeInvMass_01[idx] * ptr_m_J_##t1[i] \
				+ ptr_m_NodeInvMass_02[idx] * ptr_m_J_##t2[i] );\
			\
			inc_arr( m_Asp_##t1 ) = dt * \
				( ptr_m_NodeInvMass_10[idx] * ptr_m_J_##t0[i] \
				+ ptr_m_NodeInvMass_11[idx] * ptr_m_J_##t1[i] \
				+ ptr_m_NodeInvMass_12[idx] * ptr_m_J_##t2[i] );\
			\
			inc_arr( m_Asp_##t2 ) = dt * \
				( ptr_m_NodeInvMass_20[idx] * ptr_m_J_##t0[i] \
				+ ptr_m_NodeInvMass_21[idx] * ptr_m_J_##t1[i] \
				+ ptr_m_NodeInvMass_22[idx] * ptr_m_J_##t2[i] );

#if (NUM_JOINT_TRIPLES == 4)

#define ADVANCE_A_SPARSE_NODES_IDX()\
		++ptr_m_Asp_00;\
		++ptr_m_Asp_01;\
		++ptr_m_Asp_02;\
		++ptr_m_Asp_03;\
		++ptr_m_Asp_04;\
		++ptr_m_Asp_05;\
		++ptr_m_Asp_06;\
		++ptr_m_Asp_07;\
		++ptr_m_Asp_08;\
		++ptr_m_Asp_09;\
		++ptr_m_Asp_10;\
		++ptr_m_Asp_11;\
		++ptr_m_JtdNodes_00;\
		++ptr_m_JtdNodes_01;\
		++ptr_m_JtdNodes_02;\
		++ptr_m_JtdNodes_03;

#elif (NUM_JOINT_TRIPLES == 6)

#define ADVANCE_A_SPARSE_NODES_IDX()\
		++ptr_m_Asp_00;\
		++ptr_m_Asp_01;\
		++ptr_m_Asp_02;\
		++ptr_m_Asp_03;\
		++ptr_m_Asp_04;\
		++ptr_m_Asp_05;\
		++ptr_m_Asp_06;\
		++ptr_m_Asp_07;\
		++ptr_m_Asp_08;\
		++ptr_m_Asp_09;\
		++ptr_m_Asp_10;\
		++ptr_m_Asp_11;\
		++ptr_m_Asp_12;\
		++ptr_m_Asp_13;\
		++ptr_m_Asp_14;\
		++ptr_m_Asp_15;\
		++ptr_m_Asp_16;\
		++ptr_m_Asp_17;\
		++ptr_m_JtdNodes_00;\
		++ptr_m_JtdNodes_01;\
		++ptr_m_JtdNodes_02;\
		++ptr_m_JtdNodes_03;\
		++ptr_m_JtdNodes_04;\
		++ptr_m_JtdNodes_05;

#endif


#define LOG_TIMERS 0

#if (LOG_TIMERS == 1)

#define TIMER_MEASURE(timer_event_name) \
	sprintf(TimerEventNames[TimerStepCount], timer_event_name); \
	TimerEventTimings[TimerStepCount] = (float)PerfTimer.Time(); \
	for (int timer_i = 0; timer_i < TimerStepCount; ++timer_i) \
		TimerEventTimings[TimerStepCount] -= TimerEventTimings[timer_i]; \
	++TimerStepCount;

#else

#define TIMER_MEASURE(timer_event_name)

#endif

void SolverLCPCG_CUDA::Solve(float dt)
{
	Timer PerfTimer;
	PerfTimer.Start();

	int TimerStepCount = 0;
	float	TimerEventTimings[64];
	char	TimerEventNames[64][32];

	//////////////////////////////////////////////////////////////////////////

	unsigned i, idx;

	if (dt < 0.00001f)
		return;

	int offset;

	Prestep(dt);

	TIMER_MEASURE("Prestep");

	float	ACT_SET_THRESHOLD = 0.001f,
			ZERO_THRESHOLD = 0.0f;

	float BetaNorm, Phi_Phi;

	// Additional variables for calculations
	float tmp_float, tmp_float2;

	// Alpha_fucked should be in (0.0f, 2 * ||A||^-1];
	float alpha_fucked = 0.02f, alpha_cg, beta;

	float GAMMA = 1.0f;
	float GAMMA_Sq = GAMMA * GAMMA;

	// PTRs for quicker array traverse
	float *ptr_m_a_x;
	float *ptr_m_a_y;
	float *ptr_m_a_z;

	float	*ptr_m_Asp_00, *ptr_m_Asp_01, *ptr_m_Asp_02, *ptr_m_Asp_03, *ptr_m_Asp_04, *ptr_m_Asp_05,
			*ptr_m_Asp_06, *ptr_m_Asp_07, *ptr_m_Asp_08, *ptr_m_Asp_09, *ptr_m_Asp_10, *ptr_m_Asp_11;
	
#if (NUM_JOINT_TRIPLES == 6)

	float	*ptr_m_Asp_12, *ptr_m_Asp_13, *ptr_m_Asp_14, *ptr_m_Asp_15, *ptr_m_Asp_16, *ptr_m_Asp_17;

#endif

	float	*ptr_m_J_00, *ptr_m_J_01, *ptr_m_J_02, *ptr_m_J_03, *ptr_m_J_04, *ptr_m_J_05,
			*ptr_m_J_06, *ptr_m_J_07, *ptr_m_J_08, *ptr_m_J_09, *ptr_m_J_10, *ptr_m_J_11;
	
#if (NUM_JOINT_TRIPLES == 6)

	float	*ptr_m_J_12, *ptr_m_J_13, *ptr_m_J_14, *ptr_m_J_15, *ptr_m_J_16, *ptr_m_J_17;

#endif

	float *ptr_m_lambda;
	float *ptr_m_g, *ptr_m_p, *ptr_m_d;

	unsigned int *ptr_m_JtdNodes_00, *ptr_m_JtdNodes_01, *ptr_m_JtdNodes_02, *ptr_m_JtdNodes_03;

#if (NUM_JOINT_TRIPLES == 6)

	unsigned int *ptr_m_JtdNodes_04, *ptr_m_JtdNodes_05;

#endif

	float *ptr_m_J_rhs;
	float *ptr_m_ad_vec, *ptr_m_ap_vec;
	float *ptr_m_CFM;
	float *ptr_m_RHS;
	float *ptr_m_Lo, *ptr_m_Hi;

	float idt = 1.0f / dt;

	INIT_J_SPARSE(0);

	ptr_m_d = &m_d[0];

	INIT_A_SPARSE(0);

	ptr_m_RHS = &m_RHS[0];
	ptr_m_J_rhs = &m_J_rhs[0];

	INIT_NODES_IDX(0);


	//////////////////////////////////////////////////////////////////////////
	// Calculate nodes-joints linkage
	//////////////////////////////////////////////////////////////////////////
	unsigned int	*ptr_m_NodesJoints_00 = &m_NodesJoints_00[0],
					*ptr_m_NodesJoints_01 = &m_NodesJoints_01[0],
					*ptr_m_NodesJoints_02 = &m_NodesJoints_02[0],
					*ptr_m_NodesJoints_03 = &m_NodesJoints_03[0],
					*ptr_m_NodesJoints_04 = &m_NodesJoints_04[0],
					*ptr_m_NodesJoints_05 = &m_NodesJoints_05[0];

	unsigned int	*ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0],
					*ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0],
					*ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0],
					*ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0],
					*ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0],
					*ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	unsigned int	*ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[0],
					*ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[0],
					*ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[0],
					*ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[0],
					*ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[0],
					*ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[0];

	for (i = 0; i < m_NumNodes; ++i)
	{
		*ptr_m_NodesJointsNum_00++ = 0;
		*ptr_m_NodesJointsNum_01++ = 0;
		*ptr_m_NodesJointsNum_02++ = 0;
		*ptr_m_NodesJointsNum_03++ = 0;
		*ptr_m_NodesJointsNum_04++ = 0;
		*ptr_m_NodesJointsNum_05++ = 0;

		*ptr_m_NodesJointsOffset_00++ = 0;
		*ptr_m_NodesJointsOffset_01++ = 0;
		*ptr_m_NodesJointsOffset_02++ = 0;
		*ptr_m_NodesJointsOffset_03++ = 0;
		*ptr_m_NodesJointsOffset_04++ = 0;
		*ptr_m_NodesJointsOffset_05++ = 0;
	}

	ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0];
	ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0];
	ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0];
	ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0];
	ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0];
	ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[0];
	ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[0];
	ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[0];
	ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[0];
	ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[0];
	ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[0];

	for (i = 0; i < m_NumJoints; ++i)
	{
		if (*ptr_m_JtdNodes_00 > 0)
			++ptr_m_NodesJointsNum_00[*ptr_m_JtdNodes_00];

		if (*ptr_m_JtdNodes_01 > 0)
			++ptr_m_NodesJointsNum_01[*ptr_m_JtdNodes_01];

		if (*ptr_m_JtdNodes_02 > 0)
			++ptr_m_NodesJointsNum_02[*ptr_m_JtdNodes_02];

		if (*ptr_m_JtdNodes_03 > 0)
			++ptr_m_NodesJointsNum_03[*ptr_m_JtdNodes_03];

		if (*ptr_m_JtdNodes_04 > 0)
			++ptr_m_NodesJointsNum_04[*ptr_m_JtdNodes_04];

		if (*ptr_m_JtdNodes_05 > 0)
			++ptr_m_NodesJointsNum_05[*ptr_m_JtdNodes_05];

		++ptr_m_JtdNodes_00;
		++ptr_m_JtdNodes_01;
		++ptr_m_JtdNodes_02;
		++ptr_m_JtdNodes_03;
		++ptr_m_JtdNodes_04;
		++ptr_m_JtdNodes_05;
	}

	ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0];
	ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0];
	ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0];
	ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0];
	ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0];
	ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[1];
	ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[1];
	ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[1];
	ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[1];
	ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[1];
	ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[1];

	for (i = 1; i < m_NumNodes; ++i)
	{
		*ptr_m_NodesJointsOffset_00 = *(ptr_m_NodesJointsOffset_00 - 1) + *ptr_m_NodesJointsNum_00;
		*ptr_m_NodesJointsOffset_01 = *(ptr_m_NodesJointsOffset_01 - 1) + *ptr_m_NodesJointsNum_01;
		*ptr_m_NodesJointsOffset_02 = *(ptr_m_NodesJointsOffset_02 - 1) + *ptr_m_NodesJointsNum_02;
		*ptr_m_NodesJointsOffset_03 = *(ptr_m_NodesJointsOffset_03 - 1) + *ptr_m_NodesJointsNum_03;
		*ptr_m_NodesJointsOffset_04 = *(ptr_m_NodesJointsOffset_04 - 1) + *ptr_m_NodesJointsNum_04;
		*ptr_m_NodesJointsOffset_05 = *(ptr_m_NodesJointsOffset_05 - 1) + *ptr_m_NodesJointsNum_05;

		*ptr_m_NodesJointsNum_00++ = 0;
		*ptr_m_NodesJointsNum_01++ = 0;
		*ptr_m_NodesJointsNum_02++ = 0;
		*ptr_m_NodesJointsNum_03++ = 0;
		*ptr_m_NodesJointsNum_04++ = 0;
		*ptr_m_NodesJointsNum_05++ = 0;

		++ptr_m_NodesJointsOffset_00;
		++ptr_m_NodesJointsOffset_01;
		++ptr_m_NodesJointsOffset_02;
		++ptr_m_NodesJointsOffset_03;
		++ptr_m_NodesJointsOffset_04;
		++ptr_m_NodesJointsOffset_05;
	}

	// Last NodeNum set to zero also
	*ptr_m_NodesJointsNum_00 = 0;
	*ptr_m_NodesJointsNum_01 = 0;
	*ptr_m_NodesJointsNum_02 = 0;
	*ptr_m_NodesJointsNum_03 = 0;
	*ptr_m_NodesJointsNum_04 = 0;
	*ptr_m_NodesJointsNum_05 = 0;

	ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0];
	ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0];
	ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0];
	ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0];
	ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0];
	ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[0];
	ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[0];
	ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[0];
	ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[0];
	ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[0];
	ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[0];

	INIT_NODES_IDX(0);

	for (i = 0; i < m_NumJoints; ++i)
	{
		if (*ptr_m_JtdNodes_00 > 0)
		{
			ptr_m_NodesJoints_00[	ptr_m_NodesJointsOffset_00	[*ptr_m_JtdNodes_00] +
									ptr_m_NodesJointsNum_00		[*ptr_m_JtdNodes_00]++  ] = i;
		}

		if (*ptr_m_JtdNodes_01 > 0)
		{
			ptr_m_NodesJoints_01[	ptr_m_NodesJointsOffset_01	[*ptr_m_JtdNodes_01] +
									ptr_m_NodesJointsNum_01		[*ptr_m_JtdNodes_01]++  ] = i;
		}

		if (*ptr_m_JtdNodes_02 > 0)
		{
			ptr_m_NodesJoints_02[	ptr_m_NodesJointsOffset_02	[*ptr_m_JtdNodes_02] +
									ptr_m_NodesJointsNum_02		[*ptr_m_JtdNodes_02]++  ] = i;
		}

		if (*ptr_m_JtdNodes_03 > 0)
		{
			ptr_m_NodesJoints_03[	ptr_m_NodesJointsOffset_03	[*ptr_m_JtdNodes_03] +
									ptr_m_NodesJointsNum_03		[*ptr_m_JtdNodes_03]++  ] = i;
		}

		if (*ptr_m_JtdNodes_04 > 0)
		{
			ptr_m_NodesJoints_04[	ptr_m_NodesJointsOffset_04	[*ptr_m_JtdNodes_04] +
									ptr_m_NodesJointsNum_04		[*ptr_m_JtdNodes_04]++  ] = i;
		}

		if (*ptr_m_JtdNodes_05 > 0)
		{
			ptr_m_NodesJoints_05[	ptr_m_NodesJointsOffset_05	[*ptr_m_JtdNodes_05] +
									ptr_m_NodesJointsNum_05		[*ptr_m_JtdNodes_05]++  ] = i;
		}

		++ptr_m_JtdNodes_00;
		++ptr_m_JtdNodes_01;
		++ptr_m_JtdNodes_02;
		++ptr_m_JtdNodes_03;
		++ptr_m_JtdNodes_04;
		++ptr_m_JtdNodes_05;
	}

	ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0];
	ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0];
	ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0];
	ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0];
	ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0];
	ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[0];
	ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[0];
	ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[0];
	ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[0];
	ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[0];
	ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[0];

	unsigned int NodesJointsSize_00 =	ptr_m_NodesJointsOffset_00	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_00		[m_NumNodes - 1];
	unsigned int NodesJointsSize_01 =	ptr_m_NodesJointsOffset_01	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_01		[m_NumNodes - 1];
	unsigned int NodesJointsSize_02 =	ptr_m_NodesJointsOffset_02	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_02		[m_NumNodes - 1];
	unsigned int NodesJointsSize_03 =	ptr_m_NodesJointsOffset_03	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_03		[m_NumNodes - 1];
	unsigned int NodesJointsSize_04 =	ptr_m_NodesJointsOffset_04	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_04		[m_NumNodes - 1];
	unsigned int NodesJointsSize_05 =	ptr_m_NodesJointsOffset_05	[m_NumNodes - 1] +
										ptr_m_NodesJointsNum_05		[m_NumNodes - 1];

	INIT_NODES_IDX(0);


	TIMER_MEASURE("Init 3 additional arrays");


	float	*ptr_m_NodeInvMass_00 = &m_NodeInvMass_00[0],
			*ptr_m_NodeInvMass_01 = &m_NodeInvMass_01[0],
			*ptr_m_NodeInvMass_02 = &m_NodeInvMass_02[0],
			*ptr_m_NodeInvMass_10 = &m_NodeInvMass_10[0],
			*ptr_m_NodeInvMass_11 = &m_NodeInvMass_11[0],
			*ptr_m_NodeInvMass_12 = &m_NodeInvMass_12[0],
			*ptr_m_NodeInvMass_20 = &m_NodeInvMass_20[0],
			*ptr_m_NodeInvMass_21 = &m_NodeInvMass_21[0],
			*ptr_m_NodeInvMass_22 = &m_NodeInvMass_22[0];

	float	*ptr_m_Ftot_x = &m_Ftot_x[0],
			*ptr_m_Ftot_y = &m_Ftot_y[0],
			*ptr_m_Ftot_z = &m_Ftot_z[0];


/*
	// y = [1 1 1 1 <...> 1]^T -- for power iteration step

	for (i = 0; i < m_NumJoints; ++i)
	{
		ptr_m_d[i] = 1.0f;

		val_arr(m_RHS) = inc_arr(m_J_rhs);

		FORM_A_SPARSE(0, 00, 01, 02);
		FORM_A_SPARSE(1, 03, 04, 05);
		FORM_A_SPARSE(2, 06, 07, 08);
		FORM_A_SPARSE(3, 09, 10, 11);

#if (NUM_JOINT_TRIPLES == 6)

		FORM_A_SPARSE(4, 12, 13, 14);
		FORM_A_SPARSE(5, 15, 16, 17);

#endif

		++ptr_m_RHS;
	}

	TIMER_MEASURE("Form A sparse");
*/
	


	ptr_m_CFM = &m_CFM[0];
	ptr_m_RHS = &m_RHS[0];
	ptr_m_J_rhs = &m_J_rhs[0];
	ptr_m_Lo = &m_Lo[0];
	ptr_m_Hi = &m_Hi[0];

	INIT_J_SPARSE(0);
	INIT_A_SPARSE(0);
	INIT_NODES_IDX(0);

	ptr_m_NodesJointsNum_00 = &m_NodesJointsNum_00[0];
	ptr_m_NodesJointsNum_01 = &m_NodesJointsNum_01[0];
	ptr_m_NodesJointsNum_02 = &m_NodesJointsNum_02[0];
	ptr_m_NodesJointsNum_03 = &m_NodesJointsNum_03[0];
	ptr_m_NodesJointsNum_04 = &m_NodesJointsNum_04[0];
	ptr_m_NodesJointsNum_05 = &m_NodesJointsNum_05[0];

	ptr_m_NodesJointsOffset_00 = &m_NodesJointsOffset_00[0];
	ptr_m_NodesJointsOffset_01 = &m_NodesJointsOffset_01[0];
	ptr_m_NodesJointsOffset_02 = &m_NodesJointsOffset_02[0];
	ptr_m_NodesJointsOffset_03 = &m_NodesJointsOffset_03[0];
	ptr_m_NodesJointsOffset_04 = &m_NodesJointsOffset_04[0];
	ptr_m_NodesJointsOffset_05 = &m_NodesJointsOffset_05[0];

	ptr_m_NodesJoints_00 = &m_NodesJoints_00[0];
	ptr_m_NodesJoints_01 = &m_NodesJoints_01[0];
	ptr_m_NodesJoints_02 = &m_NodesJoints_02[0];
	ptr_m_NodesJoints_03 = &m_NodesJoints_03[0];
	ptr_m_NodesJoints_04 = &m_NodesJoints_04[0];
	ptr_m_NodesJoints_05 = &m_NodesJoints_05[0];


	//////////////////////////////////////////////////////////////////////////
	// CUDA Event Synchronization (async launch of jacobi calculation kernel)
	//////////////////////////////////////////////////////////////////////////
	CUDA_SAFE_CALL( cuEventSynchronize(m_EventSync_Jacobi) );


	unsigned int tmpMemSize;

	//////////////////////////////////////////////////////////////////////////
	// CUDA Init and Params send
	//////////////////////////////////////////////////////////////////////////
	size_t byte_size_NumJoints_f = m_NumJoints * sizeof(float);
	size_t byte_size_NumNodes_ui = m_NumNodes * sizeof(unsigned int);


/*
	COPY_MEM_H_TO_D(Asp_00, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_01, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_02, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_03, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_04, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_05, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_06, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_07, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_08, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_09, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_10, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_11, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_12, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_13, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_14, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_15, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_16, byte_size_NumJoints_f, 0, 0);
	COPY_MEM_H_TO_D(Asp_17, byte_size_NumJoints_f, 0, 0);
*/

	unsigned mem_offset_FEM_6f	= 6 * m_Num_FEM_Joints;// * sizeof(float);

	COPY_MEM_H_TO_D(NodesJoints_00, NodesJointsSize_00 * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(NodesJoints_01, NodesJointsSize_01 * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(NodesJoints_02, NodesJointsSize_02 * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(NodesJoints_03, NodesJointsSize_03 * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(NodesJoints_04, NodesJointsSize_04 * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(NodesJoints_05, NodesJointsSize_05 * sizeof(unsigned int), 0, 0);

	COPY_MEM_H_TO_D(NodesJointsNum_00, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsNum_01, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsNum_02, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsNum_03, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsNum_04, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsNum_05, byte_size_NumNodes_ui, 0, 0);

	COPY_MEM_H_TO_D(NodesJointsOffset_00, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsOffset_01, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsOffset_02, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsOffset_03, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsOffset_04, byte_size_NumNodes_ui, 0, 0);
	COPY_MEM_H_TO_D(NodesJointsOffset_05, byte_size_NumNodes_ui, 0, 0);

	float m_Num_FEM_Rows = m_Num_FEM_Joints * 6;

	COPY_MEM_H_TO_D(J_00, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_01, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_02, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_03, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_04, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_05, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_06, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_07, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_08, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_09, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_10, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_11, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_12, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_13, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_14, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_15, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_16, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(J_17, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);

	COPY_MEM_H_TO_D(CFM, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(RHS, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);

	COPY_MEM_H_TO_D(Lo, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);
	COPY_MEM_H_TO_D(Hi, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);

	ptr_m_lambda = &m_lambda[0];
	COPY_MEM_H_TO_D(lambda, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float), mem_offset_FEM_6f * sizeof(float), mem_offset_FEM_6f);


//	COPY_MEM_H_TO_D(RHS, (m_NumJoints - m_Num_FEM_Joints) * sizeof(float), mem_offset_FEM_6f);
	CUDA_SAFE_CALL( cuMemcpyHtoD(m_gpu_RHS + mem_offset_FEM_6f * sizeof(float), ptr_m_J_rhs + mem_offset_FEM_6f, (m_NumJoints - m_Num_FEM_Rows) * sizeof(float)) );

	COPY_MEM_H_TO_D(JtdNodes_00, m_NumJoints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(JtdNodes_01, m_NumJoints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(JtdNodes_02, m_NumJoints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(JtdNodes_03, m_NumJoints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(JtdNodes_04, m_NumJoints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(JtdNodes_05, m_NumJoints * sizeof(unsigned int), 0, 0);



	ptr_m_Ftot_x = &m_Ftot_x[0];
	ptr_m_Ftot_y = &m_Ftot_y[0];
	ptr_m_Ftot_z = &m_Ftot_z[0];

	ptr_m_NodeInvMass_00 = &m_NodeInvMass_00[0];
	ptr_m_NodeInvMass_01 = &m_NodeInvMass_01[0];
	ptr_m_NodeInvMass_02 = &m_NodeInvMass_02[0];
	ptr_m_NodeInvMass_10 = &m_NodeInvMass_10[0];
	ptr_m_NodeInvMass_11 = &m_NodeInvMass_11[0];
	ptr_m_NodeInvMass_12 = &m_NodeInvMass_12[0];
	ptr_m_NodeInvMass_20 = &m_NodeInvMass_20[0];
	ptr_m_NodeInvMass_21 = &m_NodeInvMass_21[0];
	ptr_m_NodeInvMass_22 = &m_NodeInvMass_22[0];

	float *ptr_m_NodeF_x = &m_NodeF_x[0];
	float *ptr_m_NodeF_y = &m_NodeF_y[0];
	float *ptr_m_NodeF_z = &m_NodeF_z[0];

	float *ptr_m_NodeVel_x = &m_NodeVel_x[0];
	float *ptr_m_NodeVel_y = &m_NodeVel_y[0];
	float *ptr_m_NodeVel_z = &m_NodeVel_z[0];

	float	*ptr_m_NodePosRot_w = &m_NodePosRot_w[0],
			*ptr_m_NodePosRot_x = &m_NodePosRot_x[0],
			*ptr_m_NodePosRot_y = &m_NodePosRot_y[0],
			*ptr_m_NodePosRot_z = &m_NodePosRot_z[0];

	float *ptr_m_NodeInvMass0_00 = &m_NodeInvMass0_00[0];
	float *ptr_m_NodeInvMass0_01 = &m_NodeInvMass0_01[0];
	float *ptr_m_NodeInvMass0_02 = &m_NodeInvMass0_02[0];
	float *ptr_m_NodeInvMass0_10 = &m_NodeInvMass0_10[0];
	float *ptr_m_NodeInvMass0_11 = &m_NodeInvMass0_11[0];
	float *ptr_m_NodeInvMass0_12 = &m_NodeInvMass0_12[0];
	float *ptr_m_NodeInvMass0_20 = &m_NodeInvMass0_20[0];
	float *ptr_m_NodeInvMass0_21 = &m_NodeInvMass0_21[0];
	float *ptr_m_NodeInvMass0_22 = &m_NodeInvMass0_22[0];

	bool *ptr_m_IsRotational = &m_IsRotational[0];

	unsigned int IntegrateNumNodes = m_NumNodes;


	COPY_MEM_H_TO_D(Ftot_x, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(Ftot_y, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(Ftot_z, IntegrateNumNodes * sizeof(float), 0, 0);

/*
	COPY_MEM_H_TO_D(NodeVel_x, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeVel_y, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeVel_z, IntegrateNumNodes * sizeof(float), 0, 0);


	COPY_MEM_H_TO_D(NodePosRot_x, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_y, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_z, IntegrateNumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_w, IntegrateNumNodes * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(IsRotational, IntegrateNumNodes * sizeof(bool), 0, 0);
*/




	ptr_m_a_x = &m_a_x[0];
	ptr_m_a_y = &m_a_y[0];
	ptr_m_a_z = &m_a_z[0];

	ptr_m_a_x = &m_a_x[0];
	ptr_m_a_y = &m_a_y[0];
	ptr_m_a_z = &m_a_z[0];

	INIT_J_SPARSE(0);
	INIT_A_SPARSE(0);
	INIT_NODES_IDX(0);

	ptr_m_d = &m_d[0];



	TIMER_MEASURE("Memory transfer");

	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_00, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_01, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_02, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_10, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_11, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_12, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_20, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_21, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodeInvMass_22, m_ASparseCalcFunction);

	ASSIGN_TEXTURE_REF_KERNEL(Ftot_x, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Ftot_y, m_ASparseCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Ftot_z, m_ASparseCalcFunction);


	offset = 0;

	// m_NumNodes
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_NumJoints);
	CUDA_SAFE_CALL( cuParamSeti(m_ASparseCalcFunction, offset, m_NumJoints) );
	offset += sizeof(m_NumJoints);

	// dt
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, dt);
	CUDA_SAFE_CALL( cuParamSetf(m_ASparseCalcFunction, offset, dt) );
	offset += sizeof(dt);

	// Set argument list size
	CUDA_SAFE_CALL( cuParamSetSize(m_ASparseCalcFunction, offset) );

	// Set number of threads in block (as grid XYZ)
	// Here threads per block is two times larger than in other kernels
	// Since the upper bound is twice as smaller
	CUDA_SAFE_CALL( cuFuncSetBlockShape(m_ASparseCalcFunction, m_CUDADeviceMultiplier * 2 * BLOCK_SIZE, 1, 1) );

	//m_SharedMemorySizes[KernelTypes::ASPARSE];
	CUDA_SAFE_CALL( cuFuncSetSharedSize(m_ASparseCalcFunction, m_SharedMemorySizes[KernelTypes::ASPARSE]) );

	// Number of blocks is equal to number of multiprocessors
	{
		CUresult res = cuLaunchGrid(m_ASparseCalcFunction, m_MultiProcessorsNum, 1);
		CUDA_ERROR_ASSERT(res);
	}	


	TIMER_MEASURE("Form A sparse");	
	
	


	//////////////////////////////////////////////////////////////////////////
	// LCPCG itself
	//////////////////////////////////////////////////////////////////////////

	// grad_norm = ||beta + phi||
	float grad_norm = 0.0f;

	ptr_m_p = &m_p[0];
	ptr_m_g = &m_g[0];

	ptr_m_a_x = &m_a_x[0];
	ptr_m_a_y = &m_a_y[0];
	ptr_m_a_z = &m_a_z[0];

	ptr_m_Lo = &m_Lo[0];
	ptr_m_Hi = &m_Hi[0];
	ptr_m_RHS = &m_RHS[0];
	ptr_m_lambda = &m_lambda[0];

	INIT_J_SPARSE(0);
	INIT_NODES_IDX(0);

	// Assign texture references to kernel
	ASSIGN_TEXTURE_REF_KERNEL(Asp_00, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_01, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_02, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_03, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_04, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_05, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_06, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_07, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_08, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_09, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_10, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_11, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_12, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_13, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_14, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_15, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_16, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(Asp_17, m_KernelFunction);

	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_00, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_01, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_02, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_03, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_04, m_KernelFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodesJoints_05, m_KernelFunction);

	ASSIGN_TEXTURE_REF_KERNEL(invDiag, m_KernelFunction);

	ptr_m_p = &m_p[0];
	ptr_m_g = &m_g[0];
	ptr_m_ap_vec = &m_ap_vec[0];
	ptr_m_lambda = &m_lambda[0];

	float tmp_val = 0.0f;
	CUDA_SAFE_CALL( cuMemcpyHtoD(m_gpu_a_x, (void *)&tmp_val, sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyHtoD(m_gpu_a_y, (void *)&tmp_val, sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyHtoD(m_gpu_a_z, (void *)&tmp_val, sizeof(float)) );

	offset = 0;

	// m_NumNodes
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_NumNodes);
	CUDA_SAFE_CALL( cuParamSeti(m_KernelFunction, offset, m_NumNodes) );
	offset += sizeof(m_NumNodes);

	// m_NumJoints
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_NumJoints);
	CUDA_SAFE_CALL( cuParamSeti(m_KernelFunction, offset, m_NumJoints) );
	offset += sizeof(m_NumJoints);

	// m_Iterations
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_Iterations);
	CUDA_SAFE_CALL( cuParamSeti(m_KernelFunction, offset, m_Iterations) );
	offset += sizeof(m_Iterations);

	// m_Precision
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_Precision);
	CUDA_SAFE_CALL( cuParamSetf(m_KernelFunction, offset, m_Precision) );
	offset += sizeof(m_Precision);

	// Initialize block sync flags
	unsigned int BSFMemSize;
	CUdeviceptr SyncAllFlags;
	cuModuleGetGlobal(&SyncAllFlags, &BSFMemSize, m_cuModule, "syncAllFlags");
	cuMemsetD32(SyncAllFlags, 0, BSFMemSize >> 2);

	// Initialize y_norm
	unsigned int YNMemSize;
	CUdeviceptr gpu_y_norm;
	cuModuleGetGlobal(&gpu_y_norm, &YNMemSize, m_cuModule, "y_norm_inv");

	float y_norm_init = 1.0f;
	cuMemcpyHtoD(gpu_y_norm, (void *)&y_norm_init, sizeof(float));

	// Set argument list size
	CUDA_SAFE_CALL( cuParamSetSize(m_KernelFunction, offset) );

	// Set number of threads in block (as grid XYZ)
	CUDA_SAFE_CALL( cuFuncSetBlockShape(m_KernelFunction, m_CUDADeviceMultiplier * BLOCK_SIZE, 1, 1) );

	//////////////////////////////////////////////////////////////////////////
	// CUDA Events
	//////////////////////////////////////////////////////////////////////////
	CUevent EventStart, EventStop;
	CUDA_SAFE_CALL( cuEventCreate(&EventStart, CU_EVENT_BLOCKING_SYNC) );
	CUDA_SAFE_CALL( cuEventCreate(&EventStop, CU_EVENT_BLOCKING_SYNC) );

	CUDA_SAFE_CALL( cuEventRecord(EventStart, 0) );
	//////////////////////////////////////////////////////////////////////////

	//m_SharedMemorySizes[KernelTypes::SOLVER];
	CUDA_SAFE_CALL( cuFuncSetSharedSize(m_KernelFunction, m_SharedMemorySizes[KernelTypes::SOLVER]) );

	// Number of blocks is equal to number of multiprocessors
	{
		CUresult res = cuLaunchGrid(m_KernelFunction, m_MultiProcessorsNum, 1);
		CUDA_ERROR_ASSERT(res);
	}

	//////////////////////////////////////////////////////////////////////////
	// CUDA Events
	//////////////////////////////////////////////////////////////////////////
	CUDA_SAFE_CALL( cuEventRecord(EventStop, 0) );
	CUDA_SAFE_CALL( cuEventSynchronize(EventStop) );

	float EventElapsedTime;
	CUDA_SAFE_CALL( cuEventElapsedTime(&EventElapsedTime, EventStart, EventStop) );

	CUDA_SAFE_CALL( cuEventDestroy(EventStart) );
	CUDA_SAFE_CALL( cuEventDestroy(EventStop) );
	//////////////////////////////////////////////////////////////////////////


	TIMER_MEASURE("Kernel launch routine");

	ptr_m_d = &m_d[0];
	ptr_m_p = &m_p[0];
	ptr_m_g = &m_g[0];
	ptr_m_lambda = &m_lambda[0];
	ptr_m_ap_vec = &m_ap_vec[0];

	ptr_m_a_x = &m_a_x[0];
	ptr_m_a_y = &m_a_y[0];
	ptr_m_a_z = &m_a_z[0];

	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_lambda, m_gpu_lambda, m_NumJoints * sizeof(float)) );

	CUdeviceptr gpu_temp_ptr;

	cuModuleGetGlobal(&gpu_temp_ptr, &tmpMemSize, m_cuModule, "GradNorm");
	cuMemcpyDtoH((void *)&grad_norm, gpu_temp_ptr, sizeof(float));

	cuModuleGetGlobal(&gpu_temp_ptr, &tmpMemSize, m_cuModule, "m_EffectiveIterations");
	cuMemcpyDtoH((void *)&m_EffectiveIterations, gpu_temp_ptr, sizeof(unsigned int));

	cuModuleGetGlobal(&gpu_temp_ptr, &tmpMemSize, m_cuModule, "m_GradNormSq");
	cuMemcpyDtoH((void *)&m_GradNormSq, gpu_temp_ptr, sizeof(unsigned int));

	cuModuleGetGlobal(&gpu_temp_ptr, &tmpMemSize, m_cuModule, "m_LambdaNormSq");
	cuMemcpyDtoH((void *)&m_LambdaNormSq, gpu_temp_ptr, sizeof(unsigned int));

	cuModuleGetGlobal(&gpu_temp_ptr, &tmpMemSize, m_cuModule, "m_DotLambdaGrad");
	cuMemcpyDtoH((void *)&m_DotLambdaGrad, gpu_temp_ptr, sizeof(unsigned int));


	TIMER_MEASURE("Fetching results");

	//////////////////////////////////////////////////////////////////////////


#if (HARDWARE_INTEGRATION == 0)

	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_a_x, m_gpu_a_x, m_NumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_a_y, m_gpu_a_y, m_NumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_a_z, m_gpu_a_z, m_NumNodes * sizeof(float)) );

	Integrate(dt);

#elif (HARDWARE_INTEGRATION == 1)

	offset = 0;

	// m_NumNodes
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, IntegrateNumNodes);
	CUDA_SAFE_CALL( cuParamSeti(m_IntegrateFunction, offset, IntegrateNumNodes) );
	offset += sizeof(IntegrateNumNodes);

	// dt
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, dt);
	CUDA_SAFE_CALL( cuParamSetf(m_IntegrateFunction, offset, dt) );
	offset += sizeof(dt);

	// Initialize block sync flags
	cuModuleGetGlobal(&SyncAllFlags, &BSFMemSize, m_cuModule, "syncAllFlags");
	cuMemsetD32(SyncAllFlags, 0, BSFMemSize >> 2);

	// Set argument list size
	CUDA_SAFE_CALL( cuParamSetSize(m_IntegrateFunction, offset) );

	// Set number of threads in block (as grid XYZ)
	CUDA_SAFE_CALL( cuFuncSetBlockShape(m_IntegrateFunction, m_CUDADeviceMultiplier * BLOCK_SIZE, 1, 1) );

	//m_SharedMemorySizes[KernelTypes::INTEGRATE];
	CUDA_SAFE_CALL( cuFuncSetSharedSize(m_IntegrateFunction, m_SharedMemorySizes[KernelTypes::INTEGRATE]) );

	// Number of blocks is equal to number of multiprocessors
	{
		CUresult res = cuLaunchGrid(m_IntegrateFunction, m_MultiProcessorsNum, 1);
		CUDA_ERROR_ASSERT(res);
	}

	// Read back
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeVel_x, m_gpu_NodeVel_x, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeVel_y, m_gpu_NodeVel_y, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeVel_z, m_gpu_NodeVel_z, IntegrateNumNodes * sizeof(float)) );

	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodePosRot_w, m_gpu_NodePosRot_w, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodePosRot_x, m_gpu_NodePosRot_x, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodePosRot_y, m_gpu_NodePosRot_y, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodePosRot_z, m_gpu_NodePosRot_z, IntegrateNumNodes * sizeof(float)) );

	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_00, m_gpu_NodeInvMass_00, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_01, m_gpu_NodeInvMass_01, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_02, m_gpu_NodeInvMass_02, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_10, m_gpu_NodeInvMass_10, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_11, m_gpu_NodeInvMass_11, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_12, m_gpu_NodeInvMass_12, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_20, m_gpu_NodeInvMass_20, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_21, m_gpu_NodeInvMass_21, IntegrateNumNodes * sizeof(float)) );
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)ptr_m_NodeInvMass_22, m_gpu_NodeInvMass_22, IntegrateNumNodes * sizeof(float)) );

	for (i = 0; i < m_NumNodes; ++i)
	{
		inc_arr(m_NodeF_x) = 0.0f;
		inc_arr(m_NodeF_y) = 0.0f;
		inc_arr(m_NodeF_z) = 0.0f;
	}

#endif

	TIMER_MEASURE("Integrate");


#if (LOG_TIMERS == 1)

	float SolveTime = PerfTimer.Time();

	gLog.Print("Event Elapsed Time: %f of %f;", EventElapsedTime, SolveTime);

	float TotalTime = 0.0f;
	for (i = 0; i < TimerStepCount; ++i)
	{
		gLog.Print("- %s: %f ms;", TimerEventNames[i], TimerEventTimings[i]);
		TotalTime += TimerEventTimings[i];
	}
	gLog.Print("TOTAL (SUM): %f ms;\n", TotalTime);

#endif
}

CUresult SolverLCPCG_CUDA::InitCUDA(unsigned int device)
{
	m_cuDevice = 0;

	int deviceCount = 0;
	CUDA_SAFE_CALL(cuInit(0));
	CUDA_SAFE_CALL(cuDeviceGetCount(&deviceCount));

	if (deviceCount == 0)
	{
		exit(-1);
	}

	int dev = device;

	CUDA_SAFE_CALL(cuDeviceGet(&m_cuDevice, dev));
	char name[100];
	cuDeviceGetName(name, 100, m_cuDevice);

#define ErrorExit() \
		if (CUDA_SUCCESS != status)\
		{\
			CUDA_ERROR_ASSERT(status);\
			cuCtxDetach(m_cuContext);\
			return status;\
		}

	CUresult status = cuCtxCreate(&m_cuContext, 0, m_cuDevice);
	ErrorExit();

	int majCC, minCC;
	cuDeviceComputeCapability(&majCC, &minCC, m_cuDevice);

	char tmpText[256];
	sprintf(tmpText, "Device[%s]: compute capability: %d.%d;", name, majCC, minCC);
	MessageBox(0, tmpText, "infobox", 0);

	//InitShader(&RBlur, IDR_RBLUR_FS, IDR_RBLUR_VS)

/*
	HRSRC hRSrc_PTX = FindResource(NULL, MAKEINTRESOURCE(IDR_CUDA_PTX1), "CUDA_PTX");

	int RSrc_PTX_Size = SizeofResource(NULL, hRSrc_PTX);
	char *ptr_ptx = new char[RSrc_PTX_Size + 1];//(char *)LockResource(LoadResource(NULL, hRSrc_PTX));

	memcpy( ptr_ptx, LockResource(LoadResource(NULL, hRSrc_PTX)), RSrc_PTX_Size * sizeof(char) );
	ptr_ptx[RSrc_PTX_Size] = '\0';
*/

//	status = cuModuleLoad(&m_cuModule, "resources\\solver_kernel.ptx");
//	status = cuModuleLoadData(&m_cuModule, m_MainPTX_Image);

/*
	//////////////////////////////////////////////////////////////////////////
	// EXCERPT from NVIDIA_FermiCompatibilityGuide.pdf	
	//////////////////////////////////////////////////////////////////////////

	// We specify PTXJIT compilation with parameters
	const unsigned int jitNumOptions = 3;
	CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
	void **jitOptVals = new void*[jitNumOptions];

	// set up size of compilation log buffer
	jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
	int jitLogBufferSize = 1024;
	jitOptVals[0] = (void *)jitLogBufferSize;
	
	// set up pointer to the compilation log buffer
	jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
	char *jitLogBuffer = new char[jitLogBufferSize];
	jitOptVals[1] = jitLogBuffer;
	
	// set up pointer for Maximum # of registers
	jitOptions[2] = CU_JIT_MAX_REGISTERS;
	int jitRegCount = 32;
	jitOptVals[2] = (void *)jitRegCount;
*/

	unsigned int jitNumOptions = 3;
	CUjit_option *jitOptions = new CUjit_option [jitNumOptions];
	void **optionValues = new void *[jitNumOptions];

// 	jitOptions[0] = CU_JIT_MAX_REGISTERS;
// 	optionValues[0] = (void *)64;

	jitOptions[0] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
	int jitLogBufferSize = 1024;
	optionValues[0] = (void *)jitLogBufferSize;
	
	// set up pointer to the compilation log buffer
	jitOptions[1] = CU_JIT_INFO_LOG_BUFFER;
	char *jitLogBuffer = new char[jitLogBufferSize];
	optionValues[1] = jitLogBuffer;
	
	// set up pointer for Maximum # of registers
	jitOptions[2] = CU_JIT_MAX_REGISTERS;
	optionValues[2] = (void *)64;

	status = cuModuleLoadDataEx(&m_cuModule, m_MainPTX_Image, jitNumOptions, jitOptions, optionValues);
	ErrorExit();

	gLog.Print("JIT Log:\n%s\n", jitLogBuffer);

	delete [] jitLogBuffer;
	delete [] optionValues;
	delete [] jitOptions;

//	status = cuModuleLoad(&m_cuAdditionalModule, "resources\\jacobians_kernel.ptx");
// 	ErrorExit();

	if (majCC > 1 || minCC >= 2)
	{
		// Double all thread numbers for this type of devices
		m_CUDADeviceMultiplier = 2;

		status = cuModuleGetFunction(&m_KernelFunction, m_cuModule, "SolverKernel_8Warps");
		ErrorExit();
	}
	else
	{
		// Thread numbers stays same
		m_CUDADeviceMultiplier = 1;

		status = cuModuleGetFunction(&m_KernelFunction, m_cuModule, "SolverKernel_4Warps");
		ErrorExit();
	}

	status = cuModuleGetFunction(&m_ASparseCalcFunction, m_cuModule, "ComputeASparse");
	ErrorExit();

	status = cuModuleGetFunction(&m_IntegrateFunction, m_cuModule, "Integrate");
	ErrorExit();

// 	status = cuModuleGetFunction(&m_JacobiCalcFunction, m_cuModule, "ComputeFEMJacobians");
// 	ErrorExit();

	if (majCC > 1 || minCC >= 2)
	{
		status = cuModuleGetFunction(&m_JacobiCalcFunction, m_cuModule, "ComputeFEMJacobians_8Warps");
		ErrorExit();
	}
	else
	{
		status = cuModuleGetFunction(&m_JacobiCalcFunction, m_cuModule, "ComputeFEMJacobians_4Warps");
		ErrorExit();
	}

#undef ErrorExit

	cuDeviceGetProperties(&m_GPU_prop, m_cuDevice);
	assert(m_GPU_prop.SIMDWidth == WARP_SIZE);

	m_SharedMemPerBlock = m_GPU_prop.sharedMemPerBlock;


	cuDeviceGetAttribute(&m_MultiProcessorsNum, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, m_cuDevice);

/*
		SOLVER = 0,
		ASPARSE,
		INTEGRATE,
		JACOBI
*/

	{
		int reg_num, local_size, shared_size, const_size;
		cuFuncGetAttribute(&reg_num, CU_FUNC_ATTRIBUTE_NUM_REGS, m_KernelFunction);
		cuFuncGetAttribute(&local_size, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, m_KernelFunction);
		cuFuncGetAttribute(&shared_size, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, m_KernelFunction);
		cuFuncGetAttribute(&const_size, CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, m_KernelFunction);

		// Granularity
		if (majCC > 1)
		{
			m_SharedMemorySizes[KernelTypes::SOLVER] = m_GPU_prop.sharedMemPerBlock - ((shared_size + 127) & (~127));
		} else
		{
			m_SharedMemorySizes[KernelTypes::SOLVER] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 511) & (~511));
		}

		char tmp_msg[1024];
		sprintf(tmp_msg, "regNum: %d; localSize: %d; sharedSize: %d; constSize: %d;", reg_num, local_size, shared_size, const_size);

		MessageBox(0, tmp_msg, "CUDA stats :: Solver Kernel", 0);
	}
	//////////////////////////////////////////////////////////////////////////
	{
		int reg_num, local_size, shared_size, const_size;
		cuFuncGetAttribute(&reg_num, CU_FUNC_ATTRIBUTE_NUM_REGS, m_ASparseCalcFunction);
		cuFuncGetAttribute(&local_size, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, m_ASparseCalcFunction);
		cuFuncGetAttribute(&shared_size, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, m_ASparseCalcFunction);
		cuFuncGetAttribute(&const_size, CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, m_ASparseCalcFunction);

	
		if (majCC > 1)
		{
			m_SharedMemorySizes[KernelTypes::ASPARSE] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 127) & (~127));
		} else
		{
			m_SharedMemorySizes[KernelTypes::ASPARSE] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 511) & (~511));
		}
//		m_SharedMemorySizes[KernelTypes::ASPARSE] = shared_size;

		char tmp_msg[1024];
		sprintf(tmp_msg, "regNum: %d; localSize: %d; sharedSize: %d; constSize: %d;", reg_num, local_size, shared_size, const_size);

		MessageBox(0, tmp_msg, "CUDA stats :: A Sparse Calculation Kernel", 0);
	}
	//////////////////////////////////////////////////////////////////////////
	{
		int reg_num, local_size, shared_size, const_size;
		cuFuncGetAttribute(&reg_num, CU_FUNC_ATTRIBUTE_NUM_REGS, m_IntegrateFunction);
		cuFuncGetAttribute(&local_size, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, m_IntegrateFunction);
		cuFuncGetAttribute(&shared_size, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, m_IntegrateFunction);
		cuFuncGetAttribute(&const_size, CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, m_IntegrateFunction);

		// Granularity
		if (majCC > 1)
		{
			m_SharedMemorySizes[KernelTypes::INTEGRATE] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 127) & (~127));
		} else
		{
			m_SharedMemorySizes[KernelTypes::INTEGRATE] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 511) & (~511));
		}
//		m_SharedMemorySizes[KernelTypes::INTEGRATE] = shared_size;

		char tmp_msg[1024];
		sprintf(tmp_msg, "regNum: %d; localSize: %d; sharedSize: %d; constSize: %d;", reg_num, local_size, shared_size, const_size);

		MessageBox(0, tmp_msg, "CUDA stats :: Integrate Kernel", 0);
	}
	//////////////////////////////////////////////////////////////////////////
	{
		int reg_num, local_size, shared_size, const_size;
		cuFuncGetAttribute(&reg_num, CU_FUNC_ATTRIBUTE_NUM_REGS, m_JacobiCalcFunction);
		cuFuncGetAttribute(&local_size, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, m_JacobiCalcFunction);
		cuFuncGetAttribute(&shared_size, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, m_JacobiCalcFunction);
		cuFuncGetAttribute(&const_size, CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, m_JacobiCalcFunction);

		// Granularity
		if (majCC > 1)
		{
			m_SharedMemorySizes[KernelTypes::JACOBI] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 127) & (~127));
		} else
		{
			m_SharedMemorySizes[KernelTypes::JACOBI] =  m_GPU_prop.sharedMemPerBlock - ((shared_size + 511) & (~511));
		}
//		m_SharedMemorySizes[KernelTypes::JACOBI] = shared_size;

		char tmp_msg[1024];
		sprintf(tmp_msg, "regNum: %d; localSize: %d; sharedSize: %d; constSize: %d;", reg_num, local_size, shared_size, const_size);

		MessageBox(0, tmp_msg, "CUDA stats :: FEM-Joint Calculation Kernel", 0);
	}


	//////////////////////////////////////////////////////////////////////////
	// Allocate __device__ memory
	//////////////////////////////////////////////////////////////////////////
	ASSIGN_MEM_CONSTANT_PTR(a_x, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(a_y, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(a_z, MAX_NUM_NODES * sizeof(float));

//	ASSIGN_MEM_CONSTANT_PTR(internal_results, m_MultiProcessorsNum * sizeof(float));
    CUDA_SAFE_CALL( cuMemAlloc(&m_gpu_internal_results, m_MultiProcessorsNum * sizeof(float)) ); 
	{
		unsigned int SymbolMemSize;
		CUdeviceptr Symbol;
		cuModuleGetGlobal(&Symbol, &SymbolMemSize, m_cuModule, "ptr_internal_results");
		cuMemsetD32(Symbol, m_gpu_internal_results, 1);
	}

    CUDA_SAFE_CALL( cuMemAlloc(&m_gpu_internal_results_1, m_MultiProcessorsNum * sizeof(float)) ); 
	{
		unsigned int SymbolMemSize;
		CUdeviceptr Symbol;
		cuModuleGetGlobal(&Symbol, &SymbolMemSize, m_cuModule, "ptr_internal_results_1");
		cuMemsetD32(Symbol, m_gpu_internal_results_1, 1);
	}

    CUDA_SAFE_CALL( cuMemAlloc(&m_gpu_internal_results_2, m_MultiProcessorsNum * sizeof(float)) ); 
	{
		unsigned int SymbolMemSize;
		CUdeviceptr Symbol;
		cuModuleGetGlobal(&Symbol, &SymbolMemSize, m_cuModule, "ptr_internal_results_2");
		cuMemsetD32(Symbol, m_gpu_internal_results_2, 1);
	}

    CUDA_SAFE_CALL( cuMemAlloc(&m_gpu_internal_results_3, m_MultiProcessorsNum * sizeof(float)) ); 
	{
		unsigned int SymbolMemSize;
		CUdeviceptr Symbol;
		cuModuleGetGlobal(&Symbol, &SymbolMemSize, m_cuModule, "ptr_internal_results_3");
		cuMemsetD32(Symbol, m_gpu_internal_results_3, 1);
	}

	ASSIGN_MEM_CONSTANT_PTR(J_00, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_01, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_02, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_03, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_04, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_05, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_06, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_07, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_08, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_09, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_10, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_11, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_12, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_13, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_14, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_15, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_16, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(J_17, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(CFM, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_ALIGNED_MEM_TEXTURE_REF(invDiag,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(invDiag);

	ASSIGN_MEM_CONSTANT_PTR(lambda, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Lo, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Hi, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(RHS, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_00, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_01, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_02, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_03, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_04, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_05, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_06, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_07, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_08, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_09, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_10, 6 * MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Joint_MemDmp_11, 6 * MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(d, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(p, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(g, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(ap_vec, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(ad_vec, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_00, MAX_NUM_JACOBI * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_01, MAX_NUM_JACOBI * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_02, MAX_NUM_JACOBI * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_03, MAX_NUM_JACOBI * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_04, MAX_NUM_JACOBI * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(JtdNodes_05, MAX_NUM_JACOBI * sizeof(unsigned int));

	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_00,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_01,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_02,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_03,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_04,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_05,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_06,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_07,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_08,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_09,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_10,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_11,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_12,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_13,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_14,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_15,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_16,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(Asp_17,  MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);

	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_00);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_01);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_02);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_03);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_04);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_05);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_06);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_07);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_08);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_09);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_10);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_11);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_12);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_13);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_14);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_15);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_16);
	ASSIGN_MEM_CONSTANT_PTR_NOALLOC(Asp_17);

	// 1st triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_00,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_00,	MAX_NUM_NODES * sizeof(unsigned int));
	// 2nd triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_01,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_01,	MAX_NUM_NODES * sizeof(unsigned int));
	// 3rd triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_02,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_02,	MAX_NUM_NODES * sizeof(unsigned int));
	// 4th triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_03,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_03,	MAX_NUM_NODES * sizeof(unsigned int));
	// 5th triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_04,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_04,	MAX_NUM_NODES * sizeof(unsigned int));
	// 6th triple
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsNum_05,		MAX_NUM_NODES * sizeof(unsigned int));
	ASSIGN_MEM_CONSTANT_PTR(NodesJointsOffset_05,	MAX_NUM_NODES * sizeof(unsigned int));

	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_00, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_01, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_02, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_03, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_04, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(NodesJoints_05, MAX_NUM_JACOBI * NUM_JOINT_TRIPLES * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);


	//////////////////////////////////////////////////////////////////////////
	// Integrate Ptrs
	//////////////////////////////////////////////////////////////////////////

	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_00, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_01, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_02, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_10, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_11, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_12, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_20, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_21, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass0_22, MAX_NUM_NODES * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_00, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_01, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_02, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_10, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_11, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_12, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_20, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_21, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeInvMass_22, MAX_NUM_NODES * sizeof(float));

	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_00, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_01, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_02, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_10, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_11, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_12, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_20, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_21, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodeInvMass_22, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);

	ASSIGN_MEM_CONSTANT_PTR(NodeF_x, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeF_y, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeF_z, MAX_NUM_NODES * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(Ftot_x, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Ftot_y, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(Ftot_z, MAX_NUM_NODES * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(NodeVel_x, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeVel_y, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodeVel_z, MAX_NUM_NODES * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(NodePosRot_x, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodePosRot_y, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodePosRot_z, MAX_NUM_NODES * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(NodePosRot_w, MAX_NUM_NODES * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(IsRotational, MAX_NUM_NODES * sizeof(bool));


/*
__constant__ float	*ptr_m_FEM_N_00, *ptr_m_FEM_N_01, *ptr_m_FEM_N_02,
					*ptr_m_FEM_N_10, *ptr_m_FEM_N_11, *ptr_m_FEM_N_12,
					*ptr_m_FEM_N_20, *ptr_m_FEM_N_21, *ptr_m_FEM_N_22;

__constant__ float *ptr_m_FEM_E_plastic, *ptr_m_FEM_MaxPlasticStrain, *ptr_m_FEM_Yield, *ptr_m_FEM_Creep;
__constant__ float *ptr_m_FEM_Damping;
*/
	//////////////////////////////////////////////////////////////////////////


	ASSIGN_MEM_CONSTANT_PTR(FEM_N_00, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_01, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_02, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_10, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_11, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_12, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_20, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_21, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_N_22, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_MEM_CONSTANT_PTR(FEM_E_plastic,	MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_CFM,		MAX_NUM_JACOBI * sizeof(float));
	
	ASSIGN_MEM_CONSTANT_PTR(FEM_MaxPlasticStrain, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_Yield, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_Creep, MAX_NUM_JACOBI * sizeof(float));
	ASSIGN_MEM_CONSTANT_PTR(FEM_Damping, MAX_NUM_JACOBI * sizeof(float));

	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_B_loc, 12 * MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
	
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_Regularization,	MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);
 	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_Jp0,				MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);

	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_N1_Idx, MAX_NUM_JACOBI * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_N2_Idx, MAX_NUM_JACOBI * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_N3_Idx, MAX_NUM_JACOBI * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF(FEM_N4_Idx, MAX_NUM_JACOBI * sizeof(unsigned int), CU_AD_FORMAT_UNSIGNED_INT32);

	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodePosRot_x, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodePosRot_y, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodePosRot_z, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(NodePosRot_w, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);

	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(RHS, MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);

// 	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(CFM, MAX_NUM_JACOBI * sizeof(float), CU_AD_FORMAT_FLOAT);

	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(Ftot_x, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(Ftot_y, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);
	ASSIGN_ALIGNED_MEM_TEXTURE_REF_NOALLOC(Ftot_z, MAX_NUM_NODES * sizeof(float), CU_AD_FORMAT_FLOAT);

	//////////////////////////////////////////////////////////////////////////

	CUDA_SAFE_CALL( cuEventCreate(&m_EventSync_Jacobi, CU_EVENT_BLOCKING_SYNC) );

    return CUDA_SUCCESS;
}

void SolverLCPCG_CUDA::DeinitCUDA()
{
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_JtdNodes_05) );

	//////////////////////////////////////////////////////////////////////////

	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_05) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_06) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_07) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_08) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_09) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_10) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Joint_MemDmp_11) );

	//////////////////////////////////////////////////////////////////////////


	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_05) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_06) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_07) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_08) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_09) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_10) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_11) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_12) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_13) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_14) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_15) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_16) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Asp_17) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_05) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_06) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_07) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_08) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_09) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_10) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_11) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_12) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_13) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_14) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_15) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_16) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_J_17) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_CFM) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJoints_05) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsNum_05) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_03) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_04) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_NodesJointsOffset_05) );

	
	CUDA_SAFE_CALL( cuMemFree(m_gpu_lambda) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_invDiag) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Lo) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_Hi) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_RHS) );


	CUDA_SAFE_CALL( cuMemFree(m_gpu_d) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_p) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_g) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_ap_vec) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_ad_vec) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_a_x) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_a_y) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_a_z) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_internal_results) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_internal_results_1) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_internal_results_2) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_internal_results_3) );



/*
	CUdeviceptr m_gpu_FEM_Damping, m_gpu_FEM_Regularization;
	CUdeviceptr	m_gpu_FEM_CFM, m_gpu_FEM_E_plastic;
	CUdeviceptr	m_gpu_FEM_B_loc, m_gpu_FEM_N;
	CUdeviceptr	m_gpu_FEM_N1_Idx, m_gpu_FEM_N2_Idx, m_gpu_FEM_N3_Idx, m_gpu_FEM_N4_Idx;
*/
	//////////////////////////////////////////////////////////////////////////

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_00) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_01) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_02) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_10) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_11) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_12) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_20) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_21) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N_22) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_B_loc) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_Damping) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_Regularization) );
 	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_CFM) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_E_plastic) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_Jp0) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N1_Idx) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N2_Idx) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N3_Idx) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_N4_Idx) );

	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_MaxPlasticStrain) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_Yield) );
	CUDA_SAFE_CALL( cuMemFree(m_gpu_FEM_Creep) );

	//////////////////////////////////////////////////////////////////////////

	CUDA_SAFE_CALL( cuEventDestroy(m_EventSync_Jacobi) );

    CUDA_SAFE_CALL( cuCtxDetach(m_cuContext) );
}


void SolverLCPCG_CUDA::SendInitParams_GPU()
{
	float	*ptr_m_NodeInvMass_00 = &m_NodeInvMass_00[0],
			*ptr_m_NodeInvMass_01 = &m_NodeInvMass_01[0],
			*ptr_m_NodeInvMass_02 = &m_NodeInvMass_02[0],
			*ptr_m_NodeInvMass_10 = &m_NodeInvMass_10[0],
			*ptr_m_NodeInvMass_11 = &m_NodeInvMass_11[0],
			*ptr_m_NodeInvMass_12 = &m_NodeInvMass_12[0],
			*ptr_m_NodeInvMass_20 = &m_NodeInvMass_20[0],
			*ptr_m_NodeInvMass_21 = &m_NodeInvMass_21[0],
			*ptr_m_NodeInvMass_22 = &m_NodeInvMass_22[0];

	float *ptr_m_NodeInvMass0_00 = &m_NodeInvMass0_00[0];
	float *ptr_m_NodeInvMass0_01 = &m_NodeInvMass0_01[0];
	float *ptr_m_NodeInvMass0_02 = &m_NodeInvMass0_02[0];
	float *ptr_m_NodeInvMass0_10 = &m_NodeInvMass0_10[0];
	float *ptr_m_NodeInvMass0_11 = &m_NodeInvMass0_11[0];
	float *ptr_m_NodeInvMass0_12 = &m_NodeInvMass0_12[0];
	float *ptr_m_NodeInvMass0_20 = &m_NodeInvMass0_20[0];
	float *ptr_m_NodeInvMass0_21 = &m_NodeInvMass0_21[0];
	float *ptr_m_NodeInvMass0_22 = &m_NodeInvMass0_22[0];

	float	*ptr_m_NodePosRot_w = &m_NodePosRot_w[0],
			*ptr_m_NodePosRot_x = &m_NodePosRot_x[0],
			*ptr_m_NodePosRot_y = &m_NodePosRot_y[0],
			*ptr_m_NodePosRot_z = &m_NodePosRot_z[0];

	float *ptr_m_NodeVel_x = &m_NodeVel_x[0];
	float *ptr_m_NodeVel_y = &m_NodeVel_y[0];
	float *ptr_m_NodeVel_z = &m_NodeVel_z[0];

	bool *ptr_m_IsRotational = &m_IsRotational[0];

	//////////////////////////////////////////////////////////////////////////
	COPY_MEM_H_TO_D(NodeInvMass_00, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_01, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_02, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_10, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_11, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_12, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_20, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_21, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass_22, m_NumNodes * sizeof(float), 0, 0);
	//////////////////////////////////////////////////////////////////////////

	COPY_MEM_H_TO_D(NodeInvMass0_00, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_01, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_02, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_10, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_11, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_12, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_20, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_21, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeInvMass0_22, m_NumNodes * sizeof(float), 0, 0);

	//////////////////////////////////////////////////////////////////////////

	COPY_MEM_H_TO_D(NodeVel_x, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeVel_y, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodeVel_z, m_NumNodes * sizeof(float), 0, 0);


	COPY_MEM_H_TO_D(NodePosRot_x, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_y, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_z, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_w, m_NumNodes * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(IsRotational, m_NumNodes * sizeof(bool), 0, 0);
}








//////////////////////////////////////////////////////////////////////////
// GPU Jacobi calculation
//////////////////////////////////////////////////////////////////////////
void SolverLCPCG_CUDA::SendFEMInitParams_GPU(
			std::vector<FEMJoint, GlobalAllocator<FEMJoint>> &FEM_Joints
			)
{
	float *ptr_m_FEM_Damping = &m_FEM_Damping[0];
	float *ptr_m_FEM_Regularization = &m_FEM_Regularization[0];

	float *ptr_m_FEM_Yield = &m_FEM_Yield[0];
	float *ptr_m_FEM_Creep = &m_FEM_Creep[0];
	float *ptr_m_FEM_MaxPlasticStrain = &m_FEM_MaxPlasticStrain[0];

	float *ptr_m_FEM_CFM = &m_FEM_CFM[0];
	float *ptr_m_FEM_E_plastic = &m_FEM_E_plastic[0];
	float *ptr_m_FEM_B_loc = &m_FEM_B_loc[0];
	float *ptr_m_FEM_Jp0 = &m_FEM_Jp0[0];

	float *ptr_m_FEM_N_00 = &m_FEM_N_00[0];
	float *ptr_m_FEM_N_01 = &m_FEM_N_01[0];
	float *ptr_m_FEM_N_02 = &m_FEM_N_02[0];
	float *ptr_m_FEM_N_10 = &m_FEM_N_10[0];
	float *ptr_m_FEM_N_11 = &m_FEM_N_11[0];
	float *ptr_m_FEM_N_12 = &m_FEM_N_12[0];
	float *ptr_m_FEM_N_20 = &m_FEM_N_20[0];
	float *ptr_m_FEM_N_21 = &m_FEM_N_21[0];
	float *ptr_m_FEM_N_22 = &m_FEM_N_22[0];

	unsigned int *ptr_m_FEM_N1_Idx = &m_FEM_N1_Idx[0];
	unsigned int *ptr_m_FEM_N2_Idx = &m_FEM_N2_Idx[0];
	unsigned int *ptr_m_FEM_N3_Idx = &m_FEM_N3_Idx[0];
	unsigned int *ptr_m_FEM_N4_Idx = &m_FEM_N4_Idx[0];

	unsigned int FEMJoints_Total = FEM_Joints.size();

	m_Num_FEM_Joints = 0;

	// Form host arrays
	std::vector<FEMJoint, GlobalAllocator<FEMJoint>>::iterator itJointEnd = FEM_Joints.end(), itJoint;
	for (itJoint = FEM_Joints.begin(); itJoint != itJointEnd; ++itJoint)
	{
		ptr_m_FEM_Damping[m_Num_FEM_Joints] = itJoint->m_Damping;
		ptr_m_FEM_Regularization[m_Num_FEM_Joints] = itJoint->m_Regularization;
		ptr_m_FEM_Yield[m_Num_FEM_Joints] = itJoint->m_Yield;
		ptr_m_FEM_Creep[m_Num_FEM_Joints] = itJoint->m_Creep;
		ptr_m_FEM_MaxPlasticStrain[m_Num_FEM_Joints] = itJoint->m_MaxPlasticStrain;

		// N matrix
		ptr_m_FEM_N_00[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[0][0];
		ptr_m_FEM_N_01[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[0][1];
		ptr_m_FEM_N_02[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[0][2];

		ptr_m_FEM_N_10[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[1][0];
		ptr_m_FEM_N_11[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[1][1];
		ptr_m_FEM_N_12[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[1][2];

		ptr_m_FEM_N_20[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[2][0];
		ptr_m_FEM_N_21[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[2][1];
		ptr_m_FEM_N_22[m_Num_FEM_Joints] = itJoint->m_N.mMatrix[2][2];

		ptr_m_FEM_N1_Idx[m_Num_FEM_Joints] = itJoint->m_Node1Idx;
		ptr_m_FEM_N2_Idx[m_Num_FEM_Joints] = itJoint->m_Node2Idx;
		ptr_m_FEM_N3_Idx[m_Num_FEM_Joints] = itJoint->m_Node3Idx;
		ptr_m_FEM_N4_Idx[m_Num_FEM_Joints] = itJoint->m_Node4Idx;

		unsigned int threadInWarp_SW = m_Num_FEM_Joints & (WARP_SIZE - 1);
		unsigned int warpJointIdx_SW = (m_Num_FEM_Joints - threadInWarp_SW);// & (~31)) / 32;
		unsigned int rowShift = min(FEMJoints_Total - warpJointIdx_SW, 32);

		unsigned int	Jrow0, Jrow1, Jrow2,
						Jrow3, Jrow4, Jrow5;

		Jrow0 = warpJointIdx_SW * 6 + threadInWarp_SW;
		Jrow1 = Jrow0 + rowShift;
		Jrow2 = Jrow1 + rowShift;
		Jrow3 = Jrow2 + rowShift;
		Jrow4 = Jrow3 + rowShift;
		Jrow5 = Jrow4 + rowShift;

		ptr_m_FEM_CFM[Jrow0] = itJoint->m_CFM[0];
		ptr_m_FEM_CFM[Jrow1] = itJoint->m_CFM[1];
		ptr_m_FEM_CFM[Jrow2] = itJoint->m_CFM[2];
		ptr_m_FEM_CFM[Jrow3] = itJoint->m_CFM[3];
		ptr_m_FEM_CFM[Jrow4] = itJoint->m_CFM[4];
		ptr_m_FEM_CFM[Jrow5] = itJoint->m_CFM[5];

		ptr_m_FEM_E_plastic[Jrow0] = itJoint->m_E_plastic[0];
		ptr_m_FEM_E_plastic[Jrow1] = itJoint->m_E_plastic[1];
		ptr_m_FEM_E_plastic[Jrow2] = itJoint->m_E_plastic[2];
		ptr_m_FEM_E_plastic[Jrow3] = itJoint->m_E_plastic[3];
		ptr_m_FEM_E_plastic[Jrow4] = itJoint->m_E_plastic[4];
		ptr_m_FEM_E_plastic[Jrow5] = itJoint->m_E_plastic[5];

		float *ptr_temp;

		ptr_temp = &ptr_m_FEM_B_loc[6 * 12 * m_Num_FEM_Joints];
		memcpy(ptr_temp, itJoint->m_B_loc, 6 * 12 * sizeof(float));

		ptr_temp = &ptr_m_FEM_Jp0[6 * m_Num_FEM_Joints];
		memcpy(ptr_temp, itJoint->m_Jp0, 6 * sizeof(float));

		++m_Num_FEM_Joints;
	}

	// Transfer to GPU device
	COPY_MEM_H_TO_D(FEM_Damping,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_Regularization,		m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_Yield,				m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_Creep,				m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_MaxPlasticStrain,	m_Num_FEM_Joints * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(FEM_CFM,			6 * m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_E_plastic,		6 * m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_Jp0,			6 * m_Num_FEM_Joints * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(FEM_N_00,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_01,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_02,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_10,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_11,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_12,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_20,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_21,			m_Num_FEM_Joints * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(FEM_N_22,			m_Num_FEM_Joints * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(FEM_B_loc,			6 * 12 * m_Num_FEM_Joints * sizeof(float), 0, 0);

	COPY_MEM_H_TO_D(FEM_N1_Idx,			m_Num_FEM_Joints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(FEM_N2_Idx,			m_Num_FEM_Joints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(FEM_N3_Idx,			m_Num_FEM_Joints * sizeof(unsigned int), 0, 0);
	COPY_MEM_H_TO_D(FEM_N4_Idx,			m_Num_FEM_Joints * sizeof(unsigned int), 0, 0);

	float	*ptr_m_NodePosRot_w = &m_NodePosRot_w[0],
			*ptr_m_NodePosRot_x = &m_NodePosRot_x[0],
			*ptr_m_NodePosRot_y = &m_NodePosRot_y[0],
			*ptr_m_NodePosRot_z = &m_NodePosRot_z[0];

	COPY_MEM_H_TO_D(NodePosRot_x, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_y, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_z, m_NumNodes * sizeof(float), 0, 0);
	COPY_MEM_H_TO_D(NodePosRot_w, m_NumNodes * sizeof(float), 0, 0);
}

void SolverLCPCG_CUDA::SendFEMParams_GPU(
			std::vector<FEMJoint, GlobalAllocator<FEMJoint>> &FEM_Joints
			)
{
	float *ptr_m_lambda = &m_lambda[0];
	unsigned int	*ptr_m_JtdNodes_00, *ptr_m_JtdNodes_01, *ptr_m_JtdNodes_02,
					*ptr_m_JtdNodes_03, *ptr_m_JtdNodes_04, *ptr_m_JtdNodes_05;
	float *ptr_m_CFM = &m_CFM[0];

	INIT_NODES_IDX(0);

	unsigned int FEMJoints_Total = FEM_Joints.size();

	m_Num_FEM_Joints = 0;

	// Form host arrays
	std::vector<FEMJoint, GlobalAllocator<FEMJoint>>::iterator itJointEnd = FEM_Joints.end(), itJoint;
	for (itJoint = FEM_Joints.begin(); itJoint != itJointEnd; ++itJoint)
	{
		unsigned int threadInWarp_SW = m_Num_FEM_Joints & (WARP_SIZE - 1);
		unsigned int warpJointIdx_SW = (m_Num_FEM_Joints - threadInWarp_SW);// & (~31)) / 32;
		unsigned int rowShift = min(FEMJoints_Total - warpJointIdx_SW, 32);

		unsigned int	Jrow0, Jrow1, Jrow2,
						Jrow3, Jrow4, Jrow5;

		Jrow0 = warpJointIdx_SW * 6 + threadInWarp_SW;
		Jrow1 = Jrow0 + rowShift;
		Jrow2 = Jrow1 + rowShift;
		Jrow3 = Jrow2 + rowShift;
		Jrow4 = Jrow3 + rowShift;
		Jrow5 = Jrow4 + rowShift;

		ptr_m_lambda[Jrow0] = itJoint->m_lambda0[0];
		ptr_m_lambda[Jrow1] = itJoint->m_lambda0[1];
		ptr_m_lambda[Jrow2] = itJoint->m_lambda0[2];
		ptr_m_lambda[Jrow3] = itJoint->m_lambda0[3];
		ptr_m_lambda[Jrow4] = itJoint->m_lambda0[4];
		ptr_m_lambda[Jrow5] = itJoint->m_lambda0[5];

#define SET_NODES_INDICES(row) \
		ptr_m_JtdNodes_00[Jrow##row] = itJoint->m_Node1Idx; \
		ptr_m_JtdNodes_01[Jrow##row] = itJoint->m_Node2Idx; \
		ptr_m_JtdNodes_02[Jrow##row] = itJoint->m_Node3Idx; \
		ptr_m_JtdNodes_03[Jrow##row] = itJoint->m_Node4Idx; \
		ptr_m_JtdNodes_04[Jrow##row] = 0; \
		ptr_m_JtdNodes_05[Jrow##row] = 0;

		SET_NODES_INDICES(0);
		SET_NODES_INDICES(1);
		SET_NODES_INDICES(2);
		SET_NODES_INDICES(3);
		SET_NODES_INDICES(4);
		SET_NODES_INDICES(5);

#undef SET_NODES_INDICES

		itJoint->m_fetchIndices[0] = Jrow0;
		itJoint->m_fetchIndices[1] = Jrow1;
		itJoint->m_fetchIndices[2] = Jrow2;
		itJoint->m_fetchIndices[3] = Jrow3;
		itJoint->m_fetchIndices[4] = Jrow4;
		itJoint->m_fetchIndices[5] = Jrow5;

		++m_Num_FEM_Joints;
	}

	// Transfer to GPU device
	COPY_MEM_H_TO_D(lambda,		6 * m_Num_FEM_Joints * sizeof(float), 0, 0);
}

void SolverLCPCG_CUDA::CalcFEMParams_GPU(
			float dt
			)
{
	int offset = 0;

	ASSIGN_TEXTURE_REF_KERNEL(RHS, m_JacobiCalcFunction);

	ASSIGN_TEXTURE_REF_KERNEL(FEM_Regularization, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(FEM_Jp0, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(FEM_B_loc, m_JacobiCalcFunction);

	ASSIGN_TEXTURE_REF_KERNEL(NodePosRot_x, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodePosRot_y, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodePosRot_z, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(NodePosRot_w, m_JacobiCalcFunction);

	ASSIGN_TEXTURE_REF_KERNEL(FEM_N1_Idx, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(FEM_N2_Idx, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(FEM_N3_Idx, m_JacobiCalcFunction);
	ASSIGN_TEXTURE_REF_KERNEL(FEM_N4_Idx, m_JacobiCalcFunction);

	// m_Num_FEM_Joints
//	offset = GET_UPPER_ALIGNMENT_BOUND(offset, m_Num_FEM_Joints);
 	offset = ((offset) + 4 - 1) & ~(4 - 1);
	CUDA_SAFE_CALL( cuParamSeti(m_JacobiCalcFunction, offset, m_Num_FEM_Joints) );
	offset += sizeof(m_Num_FEM_Joints);

	// dt
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, dt);
	CUDA_SAFE_CALL( cuParamSetf(m_JacobiCalcFunction, offset, dt) );
	offset += sizeof(dt);

	// Set argument list size
	CUDA_SAFE_CALL( cuParamSetSize(m_JacobiCalcFunction, offset) );

	// Set number of threads in block (as grid XYZ)
	CUDA_SAFE_CALL( cuFuncSetBlockShape(m_JacobiCalcFunction, m_CUDADeviceMultiplier * BLOCK_SIZE, 1, 1) );

	//m_SharedMemorySizes[KernelTypes::JACOBI];
	CUDA_SAFE_CALL( cuFuncSetSharedSize(m_JacobiCalcFunction, m_SharedMemorySizes[KernelTypes::JACOBI]) );

	// Number of blocks is equal to number of multiprocessors
	{
		CUresult res = cuLaunchGridAsync(m_JacobiCalcFunction, m_MultiProcessorsNum, 1, 0);
		CUDA_ERROR_ASSERT(res);
	}

	CUDA_SAFE_CALL( cuEventRecord(m_EventSync_Jacobi, 0) );
}



#undef FORM_A_SPARSE