#ifndef _SOLVER_KERNEL_H_
#define _SOLVER_KERNEL_H_

// #include <stdio.h>
#include "common_header.h"

#define ACT_SET_THRESHOLD 0.0001f
#define PROPORTIONING_DOUBLE_STEP 1 

#define LCPCG_PRECONDITIONER_LEFTJACOBI		0
#define LCPCG_PRECONDITIONER_FACEJACOBI		0
#define LCPCG_PRECONDITIONER_DOUBLEJACOBI	1

__device__ volatile unsigned int syncAllFlags[WARP_SIZE];

// Block synchronization routine
// syncAllFlags must be initialized to zero
// in first "control" block, loops until block-flags become 1
// in other blocks when reaching sync point sets according block-flag to 1 and then idle loop
// until "control" block sets flags to zero (after some sync-operations)

#define SYNCALL_BEGIN() \
	__threadfence(); \
	__syncthreads(); \
    if (blockIdx.x == 0) \
    { \
        if (threadIdx.x < WARP_SIZE) \
		{ \
            do \
			{ \
                sh_syncAllFlags[threadIdx.x] = syncAllFlags[threadIdx.x]; \
                if (threadIdx.x < 16) \
				{ \
                    sh_syncAllFlags[threadIdx.x] += sh_syncAllFlags[threadIdx.x + 16]; \
                    sh_syncAllFlags[threadIdx.x] += sh_syncAllFlags[threadIdx.x +  8]; \
                    sh_syncAllFlags[threadIdx.x] += sh_syncAllFlags[threadIdx.x +  4]; \
                    sh_syncAllFlags[threadIdx.x] += sh_syncAllFlags[threadIdx.x +  2]; \
                    sh_syncAllFlags[threadIdx.x] += sh_syncAllFlags[threadIdx.x +  1]; \
                } \
            } \
            while (sh_syncAllFlags[0] + 1 < gridDim.x); \
        } \
        __syncthreads();

#define SYNCALL_END() \
        if (threadIdx.x < WARP_SIZE) \
		{ \
            syncAllFlags[threadIdx.x] = 0; \
            __threadfence(); \
        } \
        __syncthreads(); \
    } \
    else \
    { \
        if (threadIdx.x == blockIdx.x) \
		{ \
            syncAllFlags[threadIdx.x] = 1; \
            __threadfence(); \
            while (syncAllFlags[threadIdx.x] != 0); \
        } \
        __syncthreads(); \
    }

__constant__ float	*ptr_m_J_00, *ptr_m_J_01, *ptr_m_J_02, *ptr_m_J_03, *ptr_m_J_04, *ptr_m_J_05,
					*ptr_m_J_06, *ptr_m_J_07, *ptr_m_J_08, *ptr_m_J_09, *ptr_m_J_10, *ptr_m_J_11,
					*ptr_m_J_12, *ptr_m_J_13, *ptr_m_J_14, *ptr_m_J_15, *ptr_m_J_16, *ptr_m_J_17;

__constant__ float	*ptr_m_CFM;

__constant__ unsigned int	*ptr_m_JtdNodes_00, *ptr_m_JtdNodes_01, *ptr_m_JtdNodes_02,
							*ptr_m_JtdNodes_03, *ptr_m_JtdNodes_04, *ptr_m_JtdNodes_05;

__constant__ float	*ptr_m_lambda, *ptr_m_invDiag, *ptr_m_resid;
__constant__ float	*ptr_m_RHS;

__constant__ float	*ptr_m_Lo, *ptr_m_Hi;

__constant__ float	*ptr_m_NodePosRot_x, *ptr_m_NodePosRot_y, *ptr_m_NodePosRot_z, *ptr_m_NodePosRot_w;

__constant__ float	*ptr_m_NodeInvMass0_00, *ptr_m_NodeInvMass0_01, *ptr_m_NodeInvMass0_02,
					*ptr_m_NodeInvMass0_10, *ptr_m_NodeInvMass0_11, *ptr_m_NodeInvMass0_12,
					*ptr_m_NodeInvMass0_20, *ptr_m_NodeInvMass0_21, *ptr_m_NodeInvMass0_22;

__constant__ float	*ptr_m_NodeInvMass_00, *ptr_m_NodeInvMass_01, *ptr_m_NodeInvMass_02,
					*ptr_m_NodeInvMass_10, *ptr_m_NodeInvMass_11, *ptr_m_NodeInvMass_12,
					*ptr_m_NodeInvMass_20, *ptr_m_NodeInvMass_21, *ptr_m_NodeInvMass_22;

__constant__ float	*ptr_m_NodeF_x, *ptr_m_NodeF_y, *ptr_m_NodeF_z;

__constant__ float	*ptr_m_Ftot_x, *ptr_m_Ftot_y, *ptr_m_Ftot_z;
__constant__ float	*ptr_m_NodeVel_x, *ptr_m_NodeVel_y, *ptr_m_NodeVel_z;

__constant__ bool	*ptr_m_IsRotational;

__constant__ volatile float	*ptr_m_a_x, *ptr_m_a_y, *ptr_m_a_z;

__constant__ float	*ptr_m_ad_vec;
__constant__ float	*ptr_m_ap_vec;

__constant__ float	*ptr_m_p;
__constant__ volatile float	*ptr_m_d;
__constant__ float	*ptr_m_g;

texture<unsigned int, 1, cudaReadModeElementType>	texref_m_NodesJoints_00,
													texref_m_NodesJoints_01,
													texref_m_NodesJoints_02,
													texref_m_NodesJoints_03,
													texref_m_NodesJoints_04,
													texref_m_NodesJoints_05;

__constant__ unsigned int	*ptr_m_NodesJointsNum_00, *ptr_m_NodesJointsOffset_00;
__constant__ unsigned int	*ptr_m_NodesJointsNum_01, *ptr_m_NodesJointsOffset_01;
__constant__ unsigned int	*ptr_m_NodesJointsNum_02, *ptr_m_NodesJointsOffset_02;
__constant__ unsigned int	*ptr_m_NodesJointsNum_03, *ptr_m_NodesJointsOffset_03;
__constant__ unsigned int	*ptr_m_NodesJointsNum_04, *ptr_m_NodesJointsOffset_04;
__constant__ unsigned int	*ptr_m_NodesJointsNum_05, *ptr_m_NodesJointsOffset_05;


__constant__ float	*ptr_m_Joint_MemDmp_00, *ptr_m_Joint_MemDmp_01, *ptr_m_Joint_MemDmp_02, *ptr_m_Joint_MemDmp_03,
					*ptr_m_Joint_MemDmp_04, *ptr_m_Joint_MemDmp_05, *ptr_m_Joint_MemDmp_06, *ptr_m_Joint_MemDmp_07,
					*ptr_m_Joint_MemDmp_08, *ptr_m_Joint_MemDmp_09, *ptr_m_Joint_MemDmp_10, *ptr_m_Joint_MemDmp_11;

texture<float, 1, cudaReadModeElementType>	texref_m_invDiag,

texture<float, 1, cudaReadModeElementType>	texref_m_Asp_00, texref_m_Asp_01, texref_m_Asp_02,
											texref_m_Asp_03, texref_m_Asp_04, texref_m_Asp_05,
											texref_m_Asp_06, texref_m_Asp_07, texref_m_Asp_08,
											texref_m_Asp_09, texref_m_Asp_10, texref_m_Asp_11,
											texref_m_Asp_12, texref_m_Asp_13, texref_m_Asp_14,
											texref_m_Asp_15, texref_m_Asp_16, texref_m_Asp_17;

__constant__ float	*ptr_m_Asp_00, *ptr_m_Asp_01, *ptr_m_Asp_02, *ptr_m_Asp_03, *ptr_m_Asp_04, *ptr_m_Asp_05,
					*ptr_m_Asp_06, *ptr_m_Asp_07, *ptr_m_Asp_08, *ptr_m_Asp_09, *ptr_m_Asp_10, *ptr_m_Asp_11,
					*ptr_m_Asp_12, *ptr_m_Asp_13, *ptr_m_Asp_14, *ptr_m_Asp_15, *ptr_m_Asp_16, *ptr_m_Asp_17;


texture<float, 1, cudaReadModeElementType>	texref_m_Ftot_x, texref_m_Ftot_y, texref_m_Ftot_z;
texture<float, 1, cudaReadModeElementType>	texref_m_NodeInvMass_00, texref_m_NodeInvMass_01, texref_m_NodeInvMass_02,
											texref_m_NodeInvMass_10, texref_m_NodeInvMass_11, texref_m_NodeInvMass_12,
											texref_m_NodeInvMass_20, texref_m_NodeInvMass_21, texref_m_NodeInvMass_22;

__constant__ float	*ptr_internal_results;
__constant__ float	*ptr_internal_results_1;
__constant__ float	*ptr_internal_results_2;
__constant__ float	*ptr_internal_results_3;

__device__ volatile float y_norm_inv;

__device__ volatile float BetaNorm, PhiPhi, GradNorm;

__device__ volatile float interm_dot, interm_dot_1;

__device__ volatile float beta;

__device__ volatile float alpha_fucked, alpha_cg, alpha_f;

__device__ volatile unsigned int m_EffectiveIterations;
__device__ volatile float tmp_output_f;

__device__ volatile float m_GradNormSq;
__device__ volatile float m_LambdaNormSq;
__device__ volatile float m_DotLambdaGrad;



#if (PROPORTIONING_DOUBLE_STEP == 1)

__device__ volatile bool HalfStep;

#endif


/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Data for Jacobian calculation
/////////////////////////////////////////////////////////////////////////////////////////////////////////


texture<float, 1, cudaReadModeElementType>	texref_m_FEM_Regularization;
texture<float, 1, cudaReadModeElementType>	texref_m_FEM_Jp0, texref_m_RHS;
texture<float, 1, cudaReadModeElementType>	texref_m_FEM_B_loc;

texture<float, 1, cudaReadModeElementType>	texref_m_NodePosRot_x, texref_m_NodePosRot_y,
											texref_m_NodePosRot_z, texref_m_NodePosRot_w;

texture<unsigned int, 1, cudaReadModeElementType>	texref_m_FEM_N1_Idx, texref_m_FEM_N2_Idx,
													texref_m_FEM_N3_Idx, texref_m_FEM_N4_Idx;

__constant__ float	*ptr_m_FEM_N_00, *ptr_m_FEM_N_01, *ptr_m_FEM_N_02,
					*ptr_m_FEM_N_10, *ptr_m_FEM_N_11, *ptr_m_FEM_N_12,
					*ptr_m_FEM_N_20, *ptr_m_FEM_N_21, *ptr_m_FEM_N_22;

__constant__ float *ptr_m_FEM_E_plastic, *ptr_m_FEM_CFM;
__constant__ float *ptr_m_FEM_MaxPlasticStrain, *ptr_m_FEM_Yield, *ptr_m_FEM_Creep;
__constant__ float *ptr_m_FEM_Damping;

#define NORMALIZE_VEC(vec) \
			tmp_float = 1.0f / sqrt(vec##_x*vec##_x + vec##_y*vec##_y + vec##_z*vec##_z); \
			vec##_x *= tmp_float; \
			vec##_y *= tmp_float; \
			vec##_z *= tmp_float;

// Write joints sequentially:
// j0row0, j1row0, j2row0, ..., j0row1, j1row1, j2row1, ..., j0rowN, ..., jMrowN
#define TRANSFER_B_SUBMATRIX(row, col, J_Num, t0, t1, t2) \
			Jrow0 = warpJointIdx * 6 + threadInWarp + (row * 3 + 0) * rowShift; \
			Jrow1 = Jrow0 + rowShift; \
			Jrow2 = Jrow1 + rowShift; \
			\
			Brow0 = (J_Num * 6 + row * 3 + 0) * 12 + col * 3; \
			Brow1 = (J_Num * 6 + row * 3 + 1) * 12 + col * 3; \
			Brow2 = (J_Num * 6 + row * 3 + 2) * 12 + col * 3; \
			\
			b_prefetch_0 = tex1Dfetch(texref_m_FEM_B_loc, Brow0 + 0); \
			b_prefetch_1 = tex1Dfetch(texref_m_FEM_B_loc, Brow0 + 1); \
			b_prefetch_2 = tex1Dfetch(texref_m_FEM_B_loc, Brow0 + 2); \
			\
			ptr_m_J_##t0[Jrow0] =	b_prefetch_0 * u_00[threadIdx.x] + \
									b_prefetch_1 * u_01[threadIdx.x] + \
									b_prefetch_2 * u_02[threadIdx.x]; \
			\
			ptr_m_J_##t1[Jrow0] =	b_prefetch_0 * u_10[threadIdx.x] + \
									b_prefetch_1 * u_11[threadIdx.x] + \
									b_prefetch_2 * u_12[threadIdx.x]; \
			\
			ptr_m_J_##t2[Jrow0] =	b_prefetch_0 * u_20[threadIdx.x] + \
									b_prefetch_1 * u_21[threadIdx.x] + \
									b_prefetch_2 * u_22[threadIdx.x]; \
			\
			b_prefetch_0 = tex1Dfetch(texref_m_FEM_B_loc, Brow1 + 0); \
			b_prefetch_1 = tex1Dfetch(texref_m_FEM_B_loc, Brow1 + 1); \
			b_prefetch_2 = tex1Dfetch(texref_m_FEM_B_loc, Brow1 + 2); \
			\
			ptr_m_J_##t0[Jrow1] =	b_prefetch_0 * u_00[threadIdx.x] + \
									b_prefetch_1 * u_01[threadIdx.x] + \
									b_prefetch_2 * u_02[threadIdx.x]; \
			\
			ptr_m_J_##t1[Jrow1] =	b_prefetch_0 * u_10[threadIdx.x] + \
									b_prefetch_1 * u_11[threadIdx.x] + \
									b_prefetch_2 * u_12[threadIdx.x]; \
			\
			ptr_m_J_##t2[Jrow1] =	b_prefetch_0 * u_20[threadIdx.x] + \
									b_prefetch_1 * u_21[threadIdx.x] + \
									b_prefetch_2 * u_22[threadIdx.x]; \
			\
			b_prefetch_0 = tex1Dfetch(texref_m_FEM_B_loc, Brow2 + 0); \
			b_prefetch_1 = tex1Dfetch(texref_m_FEM_B_loc, Brow2 + 1); \
			b_prefetch_2 = tex1Dfetch(texref_m_FEM_B_loc, Brow2 + 2); \
			\
			ptr_m_J_##t0[Jrow2] =	b_prefetch_0 * u_00[threadIdx.x] + \
									b_prefetch_1 * u_01[threadIdx.x] + \
									b_prefetch_2 * u_02[threadIdx.x]; \
			\
			ptr_m_J_##t1[Jrow2] =	b_prefetch_0 * u_10[threadIdx.x] + \
									b_prefetch_1 * u_11[threadIdx.x] + \
									b_prefetch_2 * u_12[threadIdx.x]; \
			\
			ptr_m_J_##t2[Jrow2] =	b_prefetch_0 * u_20[threadIdx.x] + \
									b_prefetch_1 * u_21[threadIdx.x] + \
									b_prefetch_2 * u_22[threadIdx.x];

#define CLEAR_JACOBI_ROW_12(row) \
		ptr_m_J_12[Jrow##row] = 0.0f; \
		ptr_m_J_13[Jrow##row] = 0.0f; \
		ptr_m_J_14[Jrow##row] = 0.0f; \
		ptr_m_J_15[Jrow##row] = 0.0f; \
		ptr_m_J_16[Jrow##row] = 0.0f; \
		ptr_m_J_17[Jrow##row] = 0.0f;


#define ELASTIC_TENSOR(row, J_Num) \
		e_elastic_##row = \
					tex1Dfetch(texref_m_NodePosRot_x, m_Node1Idx) * ptr_m_J_00[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_y, m_Node1Idx) * ptr_m_J_01[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_z, m_Node1Idx) * ptr_m_J_02[Jrow##row] + \
					\
					tex1Dfetch(texref_m_NodePosRot_x, m_Node2Idx) * ptr_m_J_03[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_y, m_Node2Idx) * ptr_m_J_04[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_z, m_Node2Idx) * ptr_m_J_05[Jrow##row] + \
					\
					tex1Dfetch(texref_m_NodePosRot_x, m_Node3Idx) * ptr_m_J_06[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_y, m_Node3Idx) * ptr_m_J_07[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_z, m_Node3Idx) * ptr_m_J_08[Jrow##row] + \
					\
					tex1Dfetch(texref_m_NodePosRot_x, m_Node4Idx) * ptr_m_J_09[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_y, m_Node4Idx) * ptr_m_J_10[Jrow##row] + \
					tex1Dfetch(texref_m_NodePosRot_z, m_Node4Idx) * ptr_m_J_11[Jrow##row] \
			- tex1Dfetch(texref_m_FEM_Jp0, J_Num * 6 + row) - ptr_m_FEM_E_plastic[Jrow##row];


#define ADDITIONAL_PARAMS(row) \
		ptr_m_RHS[Jrow##row] = -idt * gamma * e_elastic_##row; \
		\
		ptr_m_Lo[Jrow##row]  = -CUDA_FLT_MAX; \
		ptr_m_Hi[Jrow##row]  =  CUDA_FLT_MAX;

template <unsigned int warpsPerBlock>
__device__ void ComputeFEMJacobiansT( unsigned int m_Num_FEM_Joints, float dt )
{
	float idt = 1.0f / dt;

	int threadInWarp = threadIdx.x & (WARP_SIZE - 1);
	int warpNum = threadIdx.x / WARP_SIZE;

	__shared__ volatile float u_00[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_01[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_02[warpsPerBlock * WARP_SIZE];

	__shared__ volatile float u_10[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_11[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_12[warpsPerBlock * WARP_SIZE];

	__shared__ volatile float u_20[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_21[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float u_22[warpsPerBlock * WARP_SIZE];

	for (unsigned int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_Num_FEM_Joints; i += blockDim.x * gridDim.x)
	{
		unsigned int threadInWarp = threadIdx.x & (WARP_SIZE - 1);
		unsigned int warpJointIdx = i - threadInWarp;
		unsigned int rowShift = min( 32, m_Num_FEM_Joints - warpJointIdx );

		float tmp_float;

		float gamma = 1.0f / (1.0f + idt * ptr_m_FEM_Damping[i]);

		unsigned int m_Node1Idx = tex1Dfetch(texref_m_FEM_N1_Idx, i);
		unsigned int m_Node2Idx = tex1Dfetch(texref_m_FEM_N2_Idx, i);
		unsigned int m_Node3Idx = tex1Dfetch(texref_m_FEM_N3_Idx, i);
		unsigned int m_Node4Idx = tex1Dfetch(texref_m_FEM_N4_Idx, i);

		float NPR1_x, NPR1_y, NPR1_z;

		NPR1_x = tex1Dfetch(texref_m_NodePosRot_x, m_Node1Idx);
		NPR1_y = tex1Dfetch(texref_m_NodePosRot_y, m_Node1Idx);
		NPR1_z = tex1Dfetch(texref_m_NodePosRot_z, m_Node1Idx);

		float edge0_x, edge0_y, edge0_z;

		edge0_x = tex1Dfetch(texref_m_NodePosRot_x, m_Node2Idx) - NPR1_x;
		edge0_y = tex1Dfetch(texref_m_NodePosRot_y, m_Node2Idx) - NPR1_y;
		edge0_z = tex1Dfetch(texref_m_NodePosRot_z, m_Node2Idx) - NPR1_z;

		float N1_x, N1_y, N1_z;

		N1_x = edge0_x	+ ( tex1Dfetch(texref_m_NodePosRot_x, m_Node3Idx) - NPR1_x )
						+ ( tex1Dfetch(texref_m_NodePosRot_x, m_Node4Idx) - NPR1_x );
		N1_y = edge0_y	+ ( tex1Dfetch(texref_m_NodePosRot_y, m_Node3Idx) - NPR1_y )
						+ ( tex1Dfetch(texref_m_NodePosRot_y, m_Node4Idx) - NPR1_y );
		N1_z = edge0_z	+ ( tex1Dfetch(texref_m_NodePosRot_z, m_Node3Idx) - NPR1_z )
						+ ( tex1Dfetch(texref_m_NodePosRot_z, m_Node4Idx) - NPR1_z );

		NORMALIZE_VEC(N1);

		float N2_x, N2_y, N2_z;

		// 	N2 = N1.Cross(edge0);
		N2_x = N1_y * edge0_z - N1_z * edge0_y;
		N2_y = N1_z * edge0_x - N1_x * edge0_z;
		N2_z = N1_x * edge0_y - N1_y * edge0_x;

		NORMALIZE_VEC(N2);

		float N3_x, N3_y, N3_z;

		// 	N3 = N2.Cross(N1);
		N3_x = N2_y * N1_z - N2_z * N1_y;
		N3_y = N2_z * N1_x - N2_x * N1_z;
		N3_z = N2_x * N1_y - N2_y * N1_x;

		NORMALIZE_VEC(N3);


/*
		float	N_new_00, N_new_01, N_new_02,
				N_new_10, N_new_11, N_new_12,
				N_new_20, N_new_21, N_new_22;

		N_new_00 = N1_x;
		N_new_10 = N1_y;
		N_new_20 = N1_z;

		N_new_01 = N2_x;
		N_new_11 = N2_y;
		N_new_21 = N2_z;

		N_new_02 = N3_x;
		N_new_12 = N3_y;
		N_new_22 = N3_z;
*/

		// 	CMatrix3 u = N_new * m_N.GetTransposed();
// 		float	u_00, u_01, u_02,
// 				u_10, u_11, u_12,
// 				u_20, u_21, u_22;
// 
// 		u_00 = N1_x * ptr_m_FEM_N_00[i] + N2_x * ptr_m_FEM_N_01[i] + N3_x * ptr_m_FEM_N_02[i];
// 		u_01 = N1_x * ptr_m_FEM_N_10[i] + N2_x * ptr_m_FEM_N_11[i] + N3_x * ptr_m_FEM_N_12[i];
// 		u_02 = N1_x * ptr_m_FEM_N_20[i] + N2_x * ptr_m_FEM_N_21[i] + N3_x * ptr_m_FEM_N_22[i];
// 
// 		u_10 = N1_y * ptr_m_FEM_N_00[i] + N2_y * ptr_m_FEM_N_01[i] + N3_y * ptr_m_FEM_N_02[i];
// 		u_11 = N1_y * ptr_m_FEM_N_10[i] + N2_y * ptr_m_FEM_N_11[i] + N3_y * ptr_m_FEM_N_12[i];
// 		u_12 = N1_y * ptr_m_FEM_N_20[i] + N2_y * ptr_m_FEM_N_21[i] + N3_y * ptr_m_FEM_N_22[i];
// 
// 		u_20 = N1_z * ptr_m_FEM_N_00[i] + N2_z * ptr_m_FEM_N_01[i] + N3_z * ptr_m_FEM_N_02[i];
// 		u_21 = N1_z * ptr_m_FEM_N_10[i] + N2_z * ptr_m_FEM_N_11[i] + N3_z * ptr_m_FEM_N_12[i];
// 		u_22 = N1_z * ptr_m_FEM_N_20[i] + N2_z * ptr_m_FEM_N_21[i] + N3_z * ptr_m_FEM_N_22[i];

		u_00[threadIdx.x] = N1_x * ptr_m_FEM_N_00[i] + N2_x * ptr_m_FEM_N_01[i] + N3_x * ptr_m_FEM_N_02[i];
		u_01[threadIdx.x] = N1_x * ptr_m_FEM_N_10[i] + N2_x * ptr_m_FEM_N_11[i] + N3_x * ptr_m_FEM_N_12[i];
		u_02[threadIdx.x] = N1_x * ptr_m_FEM_N_20[i] + N2_x * ptr_m_FEM_N_21[i] + N3_x * ptr_m_FEM_N_22[i];

		u_10[threadIdx.x] = N1_y * ptr_m_FEM_N_00[i] + N2_y * ptr_m_FEM_N_01[i] + N3_y * ptr_m_FEM_N_02[i];
		u_11[threadIdx.x] = N1_y * ptr_m_FEM_N_10[i] + N2_y * ptr_m_FEM_N_11[i] + N3_y * ptr_m_FEM_N_12[i];
		u_12[threadIdx.x] = N1_y * ptr_m_FEM_N_20[i] + N2_y * ptr_m_FEM_N_21[i] + N3_y * ptr_m_FEM_N_22[i];

		u_20[threadIdx.x] = N1_z * ptr_m_FEM_N_00[i] + N2_z * ptr_m_FEM_N_01[i] + N3_z * ptr_m_FEM_N_02[i];
		u_21[threadIdx.x] = N1_z * ptr_m_FEM_N_10[i] + N2_z * ptr_m_FEM_N_11[i] + N3_z * ptr_m_FEM_N_12[i];
		u_22[threadIdx.x] = N1_z * ptr_m_FEM_N_20[i] + N2_z * ptr_m_FEM_N_21[i] + N3_z * ptr_m_FEM_N_22[i];

		float u_Det =	u_00[threadIdx.x] * u_11[threadIdx.x] * u_22[threadIdx.x] +
						u_20[threadIdx.x] * u_01[threadIdx.x] * u_12[threadIdx.x] +
						u_02[threadIdx.x] * u_10[threadIdx.x] * u_21[threadIdx.x] -
						u_02[threadIdx.x] * u_11[threadIdx.x] * u_20[threadIdx.x] -
						u_22[threadIdx.x] * u_10[threadIdx.x] * u_01[threadIdx.x] -
						u_00[threadIdx.x] * u_12[threadIdx.x] * u_21[threadIdx.x];

		if (u_Det < 0.0f)
		{
			u_00[threadIdx.x] *= -1.0f;
			u_01[threadIdx.x] *= -1.0f;
			u_02[threadIdx.x] *= -1.0f;

			u_10[threadIdx.x] *= -1.0f;
			u_11[threadIdx.x] *= -1.0f;
			u_12[threadIdx.x] *= -1.0f;

			u_20[threadIdx.x] *= -1.0f;
			u_21[threadIdx.x] *= -1.0f;
			u_22[threadIdx.x] *= -1.0f;
		}

		unsigned int Jrow0, Jrow1, Jrow2,
					 Jrow3, Jrow4, Jrow5;
		unsigned int Brow0, Brow1, Brow2;

		float b_prefetch_0, b_prefetch_1, b_prefetch_2;

		TRANSFER_B_SUBMATRIX(0, 0, i, 00, 01, 02);
		TRANSFER_B_SUBMATRIX(0, 1, i, 03, 04, 05);
		TRANSFER_B_SUBMATRIX(0, 2, i, 06, 07, 08);
		TRANSFER_B_SUBMATRIX(0, 3, i, 09, 10, 11);
		TRANSFER_B_SUBMATRIX(1, 0, i, 00, 01, 02);
		TRANSFER_B_SUBMATRIX(1, 1, i, 03, 04, 05);
		TRANSFER_B_SUBMATRIX(1, 2, i, 06, 07, 08);
		TRANSFER_B_SUBMATRIX(1, 3, i, 09, 10, 11);

		Jrow0 = warpJointIdx * 6 + threadInWarp;
		Jrow1 = Jrow0 + rowShift;
		Jrow2 = Jrow1 + rowShift;
		Jrow3 = Jrow2 + rowShift;
		Jrow4 = Jrow3 + rowShift;
		Jrow5 = Jrow4 + rowShift;

		float regularization = tex1Dfetch(texref_m_FEM_Regularization, i);

		ptr_m_CFM[Jrow0] = (idt * gamma * ptr_m_FEM_CFM[Jrow0] + regularization);
		ptr_m_CFM[Jrow1] = (idt * gamma * ptr_m_FEM_CFM[Jrow1] + regularization);
		ptr_m_CFM[Jrow2] = (idt * gamma * ptr_m_FEM_CFM[Jrow2] + regularization);
		ptr_m_CFM[Jrow3] = (idt * gamma * ptr_m_FEM_CFM[Jrow3] + regularization);
		ptr_m_CFM[Jrow4] = (idt * gamma * ptr_m_FEM_CFM[Jrow4] + regularization);
		ptr_m_CFM[Jrow5] = (idt * gamma * ptr_m_FEM_CFM[Jrow5] + regularization);

		CLEAR_JACOBI_ROW_12(0);
		CLEAR_JACOBI_ROW_12(1);
		CLEAR_JACOBI_ROW_12(2);
		CLEAR_JACOBI_ROW_12(3);
		CLEAR_JACOBI_ROW_12(4);
		CLEAR_JACOBI_ROW_12(5);

		float e_elastic_0, e_elastic_1, e_elastic_2,
			  e_elastic_3, e_elastic_4, e_elastic_5;

		// Tensors
		ELASTIC_TENSOR(0, i);
		ELASTIC_TENSOR(1, i);
		ELASTIC_TENSOR(2, i);
		ELASTIC_TENSOR(3, i);
		ELASTIC_TENSOR(4, i);
		ELASTIC_TENSOR(5, i);

		float el_norm = 
			sqrtf(	e_elastic_0 * e_elastic_0 + e_elastic_1 * e_elastic_1 + e_elastic_2 * e_elastic_2 +
					e_elastic_3 * e_elastic_3 + e_elastic_4 * e_elastic_4 + e_elastic_5 * e_elastic_5	);

		// Creep is in [0; 1/dt]
		float Creep = ptr_m_FEM_Creep[i] * idt;

		float e_plastic_0, e_plastic_1, e_plastic_2,
			  e_plastic_3, e_plastic_4, e_plastic_5;

		e_plastic_0 = ptr_m_FEM_E_plastic[Jrow0];
		e_plastic_1 = ptr_m_FEM_E_plastic[Jrow1];
		e_plastic_2 = ptr_m_FEM_E_plastic[Jrow2];
		e_plastic_3 = ptr_m_FEM_E_plastic[Jrow3];
		e_plastic_4 = ptr_m_FEM_E_plastic[Jrow4];
		e_plastic_5 = ptr_m_FEM_E_plastic[Jrow5];

		if (el_norm > ptr_m_FEM_Yield[i])
		{
			e_plastic_0 += dt * Creep * e_elastic_0;
			e_plastic_1 += dt * Creep * e_elastic_1;
			e_plastic_2 += dt * Creep * e_elastic_2;
			e_plastic_3 += dt * Creep * e_elastic_3;
			e_plastic_4 += dt * Creep * e_elastic_4;
			e_plastic_5 += dt * Creep * e_elastic_5;
		}

		float pl_norm = 
			sqrtf(	e_plastic_0 * e_plastic_0 + e_plastic_1 * e_plastic_1 + e_plastic_2 * e_plastic_2 +
					e_plastic_3 * e_plastic_3 + e_plastic_4 * e_plastic_4 + e_plastic_5 * e_plastic_5	);

		if (pl_norm > ptr_m_FEM_MaxPlasticStrain[i])
		{
			tmp_float = ptr_m_FEM_MaxPlasticStrain[i] / pl_norm;

			e_plastic_0 *= tmp_float;
			e_plastic_1 *= tmp_float;
			e_plastic_2 *= tmp_float;
			e_plastic_3 *= tmp_float;
			e_plastic_4 *= tmp_float;
			e_plastic_5 *= tmp_float;
		}

		ptr_m_FEM_E_plastic[Jrow0] = e_plastic_0;
		ptr_m_FEM_E_plastic[Jrow1] = e_plastic_1;
		ptr_m_FEM_E_plastic[Jrow2] = e_plastic_2;
		ptr_m_FEM_E_plastic[Jrow3] = e_plastic_3;
		ptr_m_FEM_E_plastic[Jrow4] = e_plastic_4;
		ptr_m_FEM_E_plastic[Jrow5] = e_plastic_5;

		ADDITIONAL_PARAMS(0);
		ADDITIONAL_PARAMS(1);
		ADDITIONAL_PARAMS(2);
		ADDITIONAL_PARAMS(3);
		ADDITIONAL_PARAMS(4);
		ADDITIONAL_PARAMS(5);
	}

}


/////////////////////////////////////////////////////////////////////////////////////////////////////////
#define FORM_A_SPARSE(j, t0, t1, t2)\
			idx = ptr_m_JtdNodes_0##j[i];\
			\
			ptr_m_RHS[i] -= (	ptr_m_J_##t0[i] * tex1Dfetch(texref_m_Ftot_x, idx) + \
								ptr_m_J_##t1[i] * tex1Dfetch(texref_m_Ftot_y, idx) + \
								ptr_m_J_##t2[i] * tex1Dfetch(texref_m_Ftot_z, idx) );\
			\
			ptr_m_Asp_##t0 [i] = dt * \
				( tex1Dfetch(texref_m_NodeInvMass_00, idx) * ptr_m_J_##t0[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_01, idx) * ptr_m_J_##t1[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_02, idx) * ptr_m_J_##t2[i] );\
			reg_invDiag += ptr_m_J_##t0[i] * ptr_m_Asp_##t0 [i];\
			\
			ptr_m_Asp_##t1 [i] = dt * \
				( tex1Dfetch(texref_m_NodeInvMass_10, idx) * ptr_m_J_##t0[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_11, idx) * ptr_m_J_##t1[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_12, idx) * ptr_m_J_##t2[i] );\
			reg_invDiag += ptr_m_J_##t1[i] * ptr_m_Asp_##t1 [i];\
			\
			ptr_m_Asp_##t2 [i] = dt * \
				( tex1Dfetch(texref_m_NodeInvMass_20, idx) * ptr_m_J_##t0[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_21, idx) * ptr_m_J_##t1[i] \
				+ tex1Dfetch(texref_m_NodeInvMass_22, idx) * ptr_m_J_##t2[i] ); \
			reg_invDiag += ptr_m_J_##t2[i] * ptr_m_Asp_##t2 [i];

extern "C"
__global__ void ComputeASparse( unsigned int m_NumJoints, float dt )
{
	int idx;

	int threadInWarp = threadIdx.x & (WARP_SIZE - 1);
	int warpNum = threadIdx.x / WARP_SIZE;

	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
	{
		float reg_invDiag = 0.0f;

		FORM_A_SPARSE(0, 00, 01, 02);
		FORM_A_SPARSE(1, 03, 04, 05);
		FORM_A_SPARSE(2, 06, 07, 08);
		FORM_A_SPARSE(3, 09, 10, 11);
		FORM_A_SPARSE(4, 12, 13, 14);
		FORM_A_SPARSE(5, 15, 16, 17);

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
		ptr_m_invDiag[i] = 1.0f / sqrt(reg_invDiag + ptr_m_CFM[i]);
		ptr_m_CFM[i] *= ptr_m_invDiag[i];
#else
		ptr_m_invDiag[i] = 1.0f / (reg_invDiag + ptr_m_CFM[i]);
#endif
	}
}

// Reduce 4 elements inside whole block (that's why threadIdx.x instead of threadInWarp)
#define REDUCE_4_BLOCK(arrayName) \
			arrayName[threadIdx.x] += arrayName[threadIdx.x + 4]; \
			arrayName[threadIdx.x] += arrayName[threadIdx.x + 2]; \
			arrayName[threadIdx.x] += arrayName[threadIdx.x + 1]; \

#define REDUCE_MIN_4_BLOCK(arrayName) \
			if (arrayName[threadIdx.x + 4] < arrayName[threadIdx.x]) \
				arrayName[threadIdx.x] = arrayName[threadIdx.x + 4]; \
			if (arrayName[threadIdx.x + 2] < arrayName[threadIdx.x]) \
				arrayName[threadIdx.x] = arrayName[threadIdx.x + 2]; \
			if (arrayName[threadIdx.x + 1] < arrayName[threadIdx.x]) \
				arrayName[threadIdx.x] = arrayName[threadIdx.x + 1];


// Reduce 2 elements inside whole block (that's why threadIdx.x instead of threadInWarp)
#define REDUCE_2_BLOCK(arrayName) \
			arrayName[threadIdx.x] += arrayName[threadIdx.x + 2]; \
			arrayName[threadIdx.x] += arrayName[threadIdx.x + 1]; \

#define REDUCE_MIN_2_BLOCK(arrayName) \
			if (arrayName[threadIdx.x + 2] < arrayName[threadIdx.x]) \
				arrayName[threadIdx.x] = arrayName[threadIdx.x + 2]; \
			if (arrayName[threadIdx.x + 1] < arrayName[threadIdx.x]) \
				arrayName[threadIdx.x] = arrayName[threadIdx.x + 1];

template <unsigned int warpsPerBlock>
__device__ void SolverKernelT( unsigned int m_NumNodes, unsigned int m_NumJoints, unsigned int m_Iterations, float m_Precision )
{
	float GAMMA_Sq = 1.0f;

	__shared__ volatile unsigned int sh_syncAllFlags[WARP_SIZE];

	__shared__ volatile float cutArray[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float warpReduceRes[warpsPerBlock];

	__shared__ volatile float cutArray_BetaNorm[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float warpReduceRes_BetaNorm[warpsPerBlock];

	__shared__ volatile float cutArray_PhiPhi[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float warpReduceRes_PhiPhi[warpsPerBlock];

	__shared__ volatile float cutArray_GradNorm[warpsPerBlock * WARP_SIZE];
	__shared__ volatile float warpReduceRes_GradNorm[warpsPerBlock];

	__shared__ volatile float sh_alpha_fucked, sh_alpha_cg, sh_alpha_f;
	__shared__ volatile float sh_beta;

	__shared__ volatile float sh_BetaNorm;
	__shared__ volatile float sh_PhiPhi;
	__shared__ volatile float sh_GradNorm;

	__shared__ volatile unsigned int sh_Iterations;


	int threadInWarp = threadIdx.x & (WARP_SIZE - 1);
	int warpNum = threadIdx.x / WARP_SIZE;

	// Initial guess for PI
	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
	{
		ptr_m_d[i] = 1.0f;
	}

	SYNCALL_BEGIN();
	SYNCALL_END();

	__shared__ volatile float y_norm_inv_tmp;
	
	if (threadIdx.x == 0)
	{
		y_norm_inv_tmp = 1.0f;
	}
	__syncthreads();


	for (int PI_count = 0; PI_count < 15; ++PI_count)
	{
		float vec_component = 1.0f;

//		for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < m_NumNodes; i += blockDim.x * gridDim.x)
		for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
		{
 			float reg_m_g = 0.0f;

			float reg_m_a_x = 0.0f;
			float reg_m_a_y = 0.0f;
			float reg_m_a_z = 0.0f;

			unsigned int JointTripleNum, JointNodeOffset;
			unsigned int JointIndex;

			JointTripleNum = ptr_m_NodesJointsNum_00[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );//ptr_m_invDiag[JointIndex];
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
				reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
			}

			JointTripleNum = ptr_m_NodesJointsNum_01[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
				reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
			}

			JointTripleNum = ptr_m_NodesJointsNum_02[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
 				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
				reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
			}

			JointTripleNum = ptr_m_NodesJointsNum_03[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
 				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
				reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
			}

			JointTripleNum = ptr_m_NodesJointsNum_04[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
				reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
			}

			JointTripleNum = ptr_m_NodesJointsNum_05[i];
			JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
			for (int j = 0; j < JointTripleNum; ++j)
			{
				JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
				vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

				reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
				reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
				reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
			}

			ptr_m_a_x[i] = y_norm_inv_tmp * reg_m_a_x;
			ptr_m_a_y[i] = y_norm_inv_tmp * reg_m_a_y;
			ptr_m_a_z[i] = y_norm_inv_tmp * reg_m_a_z;

			ptr_m_g[i] = reg_m_g;
		}


		cutArray[threadIdx.x] = 0.0;

		SYNCALL_BEGIN();
		SYNCALL_END();

		// Reduce huge array into several (known number) fixed-size subarrays
		for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
		{

			vec_component = y_norm_inv_tmp * ptr_m_d[i];

			unsigned int t0Idx = ptr_m_JtdNodes_00[i];
			unsigned int t1Idx = ptr_m_JtdNodes_01[i];
			unsigned int t2Idx = ptr_m_JtdNodes_02[i];
			unsigned int t3Idx = ptr_m_JtdNodes_03[i];
			unsigned int t4Idx = ptr_m_JtdNodes_04[i];
			unsigned int t5Idx = ptr_m_JtdNodes_05[i];



			float reg_m_d = ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
							ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
							ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
							ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
							ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
							ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];


			reg_m_d += ptr_m_CFM[i] * vec_component;

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
			// Preconditioning
			reg_m_d *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			reg_m_d *= ptr_m_invDiag[i];
#endif


			// Place dot into reduced array
			cutArray[threadIdx.x] += reg_m_d * reg_m_d;

			ptr_m_d[i] = reg_m_d;
		}


		__threadfence();
		__syncthreads();


		// Reduce 32 elements inside one warp
		if (threadInWarp < 16)
		{
			cutArray[threadIdx.x] += cutArray[threadIdx.x + 16];
			cutArray[threadIdx.x] += cutArray[threadIdx.x +  8];
			cutArray[threadIdx.x] += cutArray[threadIdx.x +  4];
			cutArray[threadIdx.x] += cutArray[threadIdx.x +  2];
			cutArray[threadIdx.x] += cutArray[threadIdx.x +  1];
		}
		__syncthreads();

		// Reduce elements inside block (warps per block: 4)
		if (threadIdx.x < warpsPerBlock)
		{
			warpReduceRes[threadIdx.x] = cutArray[threadIdx.x * WARP_SIZE];
		}

		////////////////////////////////////////////////////////////////////////////
		// Reduce 4 elements
		// (WARNING! garbage in warpReduceRes can ruin your life
		// check twice number of warps per block
		////////////////////////////////////////////////////////////////////////////
		// Compiler should resolve this compile-time
		if (warpsPerBlock == 8)
		{
			if (threadIdx.x < 4)
			{
				REDUCE_4_BLOCK(warpReduceRes);
			}
		}
		else
		{
			if (threadIdx.x < 2)
			{
				REDUCE_2_BLOCK(warpReduceRes);
			}
		}

		// Write result of in-block reduce into global memory
		if (threadIdx.x == 0)
		{
			ptr_internal_results[blockIdx.x] = warpReduceRes[0];
			__threadfence();
		}

		SYNCALL_BEGIN()

			// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

			// Initialize temporary
			if (threadIdx.x < WARP_SIZE)
			{
				cutArray[threadIdx.x] = 0.0;
			}
			
			// Fetch from global to shared memory (speed boost)
			if (threadIdx.x < gridDim.x)
			{
				cutArray[threadIdx.x] = ptr_internal_results[threadIdx.x];
			}

			// Reduce (number of multiprocessor is less than 32)
			// and empty cells in shared tmp are initialized to zero
			// so full-reduce can be done
			if (threadIdx.x < 16)
			{
				cutArray[threadIdx.x] += cutArray[threadIdx.x + 16];
				cutArray[threadIdx.x] += cutArray[threadIdx.x +  8];
				cutArray[threadIdx.x] += cutArray[threadIdx.x +  4];
				cutArray[threadIdx.x] += cutArray[threadIdx.x +  2];
				cutArray[threadIdx.x] += cutArray[threadIdx.x +  1];
				
				// write result in thread 0, block 0
				if (threadIdx.x == 0)
				{
					y_norm_inv = 1.0f / sqrtf(cutArray[0]);
					__threadfence();
				}
			}
			
		SYNCALL_END();

		if (threadIdx.x == 0)
		{
			y_norm_inv_tmp = y_norm_inv;
		}
		__syncthreads();
	}

	SYNCALL_BEGIN()
	if (threadIdx.x == 0)
	{
		alpha_fucked = 1.6f * y_norm_inv;
		__threadfence();
	}
	SYNCALL_END();


	if (threadIdx.x == 0)
	{
		sh_alpha_fucked = alpha_fucked;
		sh_Iterations = m_Iterations;
	}
	__syncthreads();



	// TODO: make ptr_Lo a texture reference
	// so it can be mapped from the next cycle (it it worth doing?)
	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
	{
		float reg_lambda = ptr_m_lambda[i];
		float reg_Lo = ptr_m_Lo[i];
		float reg_Hi = ptr_m_Hi[i];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
		// Double Jacobi:
		// multiply IG and LoHi by D^1/2 to scale to the preconditioned unknown

		reg_Lo /= ptr_m_invDiag[i];
		reg_Hi /= ptr_m_invDiag[i];
		reg_lambda /= ptr_m_invDiag[i];
#endif

		if (reg_lambda < reg_Lo)
		{
			reg_lambda = reg_Lo;
		}
		else if (reg_lambda > reg_Hi)
		{
			reg_lambda = reg_Hi;
		}

		ptr_m_lambda[i] = reg_lambda;
	}

	SYNCALL_BEGIN();
	SYNCALL_END();

	float vec_component;

	// Calculate a = J * lambda
//	for (int i = threadIdx.x + blockIdx.x * blockDim.x + 0; i < m_NumNodes; i += blockDim.x * gridDim.x)
	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
	{
		float reg_m_a_x = 0.0f;
		float reg_m_a_y = 0.0f;
		float reg_m_a_z = 0.0f;

		unsigned int JointTripleNum, JointNodeOffset;
		unsigned int JointIndex;

		JointTripleNum = ptr_m_NodesJointsNum_00[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );//ptr_m_invDiag[JointIndex];
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_01[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
			reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_02[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_03[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_04[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_05[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
			vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

			reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
		}

		ptr_m_a_x[i] = reg_m_a_x;
		ptr_m_a_y[i] = reg_m_a_y;
		ptr_m_a_z[i] = reg_m_a_z;
	}

	SYNCALL_BEGIN();
	SYNCALL_END();

	
	cutArray_PhiPhi[threadIdx.x] = 0.0f;
	cutArray_BetaNorm[threadIdx.x] = 0.0f;

	// Reduce huge array into several (known number) fixed-size subarrays
	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
	{
		float interm_lambda = ptr_m_lambda[i];
		vec_component = interm_lambda;

		unsigned int t0Idx = ptr_m_JtdNodes_00[i];
		unsigned int t1Idx = ptr_m_JtdNodes_01[i];
		unsigned int t2Idx = ptr_m_JtdNodes_02[i];
		unsigned int t3Idx = ptr_m_JtdNodes_03[i];
		unsigned int t4Idx = ptr_m_JtdNodes_04[i];
		unsigned int t5Idx = ptr_m_JtdNodes_05[i];

		float reg_m_g = ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
						ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
						ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
						ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
						ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
						ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

		reg_m_g += ptr_m_CFM[i] * vec_component;
		reg_m_g -= ptr_m_RHS[i];

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
		// Preconditioning
		reg_m_g *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
		reg_m_g *= ptr_m_invDiag[i];
#endif

		ptr_m_g[i] = reg_m_g;


#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
		// Preconditioning in face:
		// p = z
		float reg_m_p = ptr_m_invDiag[i] * reg_m_g;
#else
		float reg_m_p = reg_m_g;
#endif


		float interm_Lo = ptr_m_Lo[i];
		float interm_Hi = ptr_m_Hi[i];

		float tmp_float2 = interm_lambda - interm_Lo;
		float tmp_float3 = interm_lambda - interm_Hi;
		
		if (abs(tmp_float2) < ACT_SET_THRESHOLD)
		{
			reg_m_p = 0.0f;

			if (reg_m_g < 0.0f)
			{
#if (PROPORTIONING_DOUBLE_STEP == 1)

				if (tmp_float3 / alpha_fucked > reg_m_g)
				{
					cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float3 / alpha_fucked;
				}
				else
				{
					cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
				}

#else

				cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif
			}
		}
		else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
		{
			reg_m_p = 0.0f;

			if (reg_m_g > 0.0f)
			{
#if (PROPORTIONING_DOUBLE_STEP == 1)

				if (tmp_float2 / alpha_fucked < reg_m_g)
				{
					cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float2 / alpha_fucked;
				}
				else
				{
					cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
				}

#else

				cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif
			}
		}
		else
		{
			if (reg_m_g > 0.0f)
			{
				if ((tmp_float2 / sh_alpha_fucked) < reg_m_g)
				{
					cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
				}
				else
				{
					cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
				}
			}
			else
			{
				if ((tmp_float3 / sh_alpha_fucked) > reg_m_g)
				{
					cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
				}
				else
				{
					cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
				}
			}
		}

		ptr_m_p[i] = reg_m_p;
	}


	// Reduce 32 elements inside one warp
	if (threadInWarp < 16)
	{
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];
	}
	__syncthreads();

	// Reduce elements inside block (warps per block: 4)
	if (threadIdx.x < warpsPerBlock)
	{
		warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
		warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
	}

	////////////////////////////////////////////////////////////////////////////
	// Reduce 4 elements
	// (WARNING! garbage in warpReduceRes can ruin your life
	// check twice number of warps per block
	////////////////////////////////////////////////////////////////////////////
	// Compiler should resolve this if comile-time
	if (warpsPerBlock == 8)
	{
		if (threadIdx.x < 4)
		{
			REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
			REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
		}
	}
	else
	{
		if (threadIdx.x < 2)
		{
			REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
			REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
		}
	}

	// Write result of in-block reduce into global memory
	if (threadIdx.x == 0)
	{
		ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
		ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];

		__threadfence();
	}

	SYNCALL_BEGIN()

		// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

		if (threadIdx.x < WARP_SIZE)
		{
			cutArray_PhiPhi[threadIdx.x] = 0.0f;
			cutArray_BetaNorm[threadIdx.x] = 0.0f;
		}
		
		// Fetch from global to shared memory (speed boost)
		if (threadIdx.x < gridDim.x)
		{
			cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
			cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
		}

		// Reduce (number of multiprocessor is less than 32)
		// and empty cells in shared tmp are initialized to zero
		// so full-reduce can be done
		if (threadIdx.x < 16)
		{
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

			// write result in thread 0, block 0
			if (threadIdx.x == 0)
			{
				BetaNorm = cutArray_BetaNorm[0];
				PhiPhi = cutArray_PhiPhi[0];

				__threadfence();
			}
		}
		
	SYNCALL_END();

	if (threadIdx.x == 0)
	{
		sh_BetaNorm = BetaNorm;
		sh_PhiPhi = PhiPhi;
	}
	__syncthreads();



	unsigned int k;

	for (k = 0; k < sh_Iterations; ++k)
	{
		if (sh_BetaNorm <= GAMMA_Sq * sh_PhiPhi)
		{
			//////////////////////////////////////////////////////////////////////////
			// LCPCG: Prepare to CG or Expansion Steps
			//////////////////////////////////////////////////////////////////////////

			float vec_component;

			// Calculate a = J * p
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
			{
				float reg_m_a_x = 0.0f;
				float reg_m_a_y = 0.0f;
				float reg_m_a_z = 0.0f;

				unsigned int JointTripleNum, JointNodeOffset;
				unsigned int JointIndex;

				JointTripleNum = ptr_m_NodesJointsNum_00[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_01[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
					reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_02[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_03[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_04[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_05[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
					vec_component = ptr_m_p[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
				}

				ptr_m_a_x[i] = reg_m_a_x;
				ptr_m_a_y[i] = reg_m_a_y;
				ptr_m_a_z[i] = reg_m_a_z;
			}

			SYNCALL_BEGIN();
			SYNCALL_END();

			cutArray_BetaNorm[threadIdx.x] = 0.0f;
			cutArray_GradNorm[threadIdx.x] = 0.0f;
			cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;

			// Calculate ap_vec = A * p = J * a
			// Calculate "cut arrays" for dot_pAp, dot_gp
			// Reduce huge array into several (known number) fixed-size subarrays
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				float reg_m_p = ptr_m_p[i];

				unsigned int t0Idx = ptr_m_JtdNodes_00[i];
				unsigned int t1Idx = ptr_m_JtdNodes_01[i];
				unsigned int t2Idx = ptr_m_JtdNodes_02[i];
				unsigned int t3Idx = ptr_m_JtdNodes_03[i];
				unsigned int t4Idx = ptr_m_JtdNodes_04[i];
				unsigned int t5Idx = ptr_m_JtdNodes_05[i];

				float reg_m_ap_vec =
					ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
					ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
					ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
					ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
					ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
					ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

				reg_m_ap_vec += ptr_m_CFM[i] * reg_m_p;

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
				// Preconditioning
				reg_m_ap_vec *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				reg_m_ap_vec *= ptr_m_invDiag[i];
#endif

				ptr_m_ap_vec[i] = reg_m_ap_vec;

				// dot_pAp
				cutArray_GradNorm[threadIdx.x] += reg_m_p * reg_m_ap_vec;

				float reg_m_lambda = ptr_m_lambda[i]; 
				float reg_m_Lo = ptr_m_Lo[i], reg_m_Hi = ptr_m_Hi[i];

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
				// Preconditioner in face:
				// z dot g = D^-1 * phi dot g

				// dot_gp
				if ( (fabs(reg_m_lambda - reg_m_Lo) < ACT_SET_THRESHOLD) ||
					 (fabs(reg_m_lambda - reg_m_Hi) < ACT_SET_THRESHOLD) )
				{
					 // Active set
				}
				else
				{
					// Free set
					float reg_m_g = ptr_m_g[i];
					cutArray_BetaNorm[threadIdx.x] += reg_m_g * (ptr_m_invDiag[i] * reg_m_g);
				}
#else
				// dot_gp
				cutArray_BetaNorm[threadIdx.x] += ptr_m_g[i] * reg_m_p;
#endif

				// MINIMUM
				if (reg_m_p > 0.0f)
				{
					float tmp_float = (reg_m_lambda - reg_m_Lo) / reg_m_p;

					if (tmp_float < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = tmp_float;
					}
				}
				else if (reg_m_p < 0.0f)
				{
					float tmp_float = (reg_m_lambda - reg_m_Hi) / reg_m_p;

					if (tmp_float < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = tmp_float;
					}
				}
			}


			// Reduce 32 elements inside one warp
			if (threadInWarp < 16)
			{
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

				// MINIMUM
				if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
				}
				if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
				}
				if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
				}
				if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
				}
				if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
				}
			}
			__syncthreads();

			// Reduce elements inside block (warps per block: 4)
			if (threadIdx.x < warpsPerBlock)
			{
				warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
			}

			////////////////////////////////////////////////////////////////////////////
			// Reduce 4 elements
			// (WARNING! garbage in warpReduceRes can ruin your life
			// check twice number of warps per block
			////////////////////////////////////////////////////////////////////////////
			// Compiler should resolve this if comile-time
			if (warpsPerBlock == 8)
			{
				if (threadIdx.x < 4)
				{
					REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_4_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_4_BLOCK(warpReduceRes_PhiPhi);
				}
			}
			else
			{
				if (threadIdx.x < 2)
				{
					REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_2_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_2_BLOCK(warpReduceRes_PhiPhi);
				}
			}

			// Write result of in-block reduce into global memory
			if (threadIdx.x == 0)
			{
				ptr_internal_results[blockIdx.x] = warpReduceRes_BetaNorm[0];
				ptr_internal_results_1[blockIdx.x] = warpReduceRes_GradNorm[0];
				ptr_internal_results_2[blockIdx.x] = warpReduceRes_PhiPhi[0];

				__threadfence();
			}

			SYNCALL_BEGIN()

				// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

				if (threadIdx.x < WARP_SIZE)
				{
					cutArray_BetaNorm[threadIdx.x] = 0.0f;
					cutArray_GradNorm[threadIdx.x] = 0.0f;
					
					cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;
				}
				
				// Fetch from global to shared memory (speed boost)
				if (threadIdx.x < gridDim.x)
				{
					cutArray_BetaNorm[threadIdx.x] = ptr_internal_results[threadIdx.x];
					cutArray_GradNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];

					cutArray_PhiPhi[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
				}

				// Reduce (number of multiprocessor is less than 32)
				// and empty cells in shared tmp are initialized to zero
				// so full-reduce can be done
				if (threadIdx.x < 16)
				{
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

					// MINIMUM
					if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
					}
					if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
					}
					if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
					}
					if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
					}
					if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
					}

					// write result in thread 0, block 0
					if (threadIdx.x == 0)
					{
		//				alpha_cg = dot_gp / dot_pAp;
						interm_dot_1 = cutArray_GradNorm[0];

						alpha_cg = cutArray_BetaNorm[0] / cutArray_GradNorm[0];
						alpha_f = cutArray_PhiPhi[0];

						__threadfence();
					}
				}
			SYNCALL_END();


			if (threadIdx.x == 0)
			{
				sh_alpha_cg = alpha_cg;
				sh_alpha_f = alpha_f;
			}
			__syncthreads();


			if (sh_alpha_cg < sh_alpha_f)
			{
				//////////////////////////////////////////////////////////////////////////
				// LCPCG: Conjugate Gradient Step
				//////////////////////////////////////////////////////////////////////////

				cutArray[threadIdx.x] = 0.0f;

				cutArray_PhiPhi[threadIdx.x] = 0.0f;
				cutArray_BetaNorm[threadIdx.x] = 0.0f;
				cutArray_GradNorm[threadIdx.x] = 0.0f;

				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_lambda;
					interm_lambda = ptr_m_lambda[i] - sh_alpha_cg * ptr_m_p[i];
					ptr_m_lambda[i] = interm_lambda;
					
					float tmp_float;
					tmp_float = ptr_m_g[i] - sh_alpha_cg * ptr_m_ap_vec[i];
					ptr_m_g[i] = tmp_float;

					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];
					float tmp_float2 = interm_lambda - interm_Lo;
					float tmp_float3 = interm_lambda - interm_Hi;
					

					if (abs(tmp_float2) < ACT_SET_THRESHOLD)
					{
						if (tmp_float < 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float3 / sh_alpha_fucked > tmp_float)
							{
								cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float;

#endif

							cutArray_GradNorm[threadIdx.x] += tmp_float * tmp_float;
						}
					}
					else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
					{
						if (tmp_float > 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float2 / sh_alpha_fucked < tmp_float)
							{
								cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += tmp_float * tmp_float;

#endif

							cutArray_GradNorm[threadIdx.x] += tmp_float * tmp_float;
						}
					}
					else
					{
#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
						// Preconditioner in face:
						// (phi dot A * p) replaced with (z dot A * p)
						cutArray[threadIdx.x] += (ptr_m_invDiag[i] * tmp_float) * ptr_m_ap_vec[i];
#else
						cutArray[threadIdx.x] += tmp_float * ptr_m_ap_vec[i];
#endif
						
						cutArray_GradNorm[threadIdx.x] += tmp_float * tmp_float;

						if (tmp_float > 0.0f)
						{
							if ((tmp_float2 / sh_alpha_fucked) < tmp_float)
							{
								cutArray_PhiPhi[threadIdx.x] += tmp_float * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += tmp_float * tmp_float;
							}
						}
						else
						{
							if ((tmp_float3 / sh_alpha_fucked) > tmp_float)
							{
								cutArray_PhiPhi[threadIdx.x] += tmp_float * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += tmp_float * tmp_float;
							}
						}
					}
				}

				// Reduce 32 elements inside one warp
				if (threadInWarp < 16)
				{
					cutArray[threadIdx.x] += cutArray[threadIdx.x + 16];
					cutArray[threadIdx.x] += cutArray[threadIdx.x +  8];
					cutArray[threadIdx.x] += cutArray[threadIdx.x +  4];
					cutArray[threadIdx.x] += cutArray[threadIdx.x +  2];
					cutArray[threadIdx.x] += cutArray[threadIdx.x +  1];

					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
				}
				__syncthreads();

				// Reduce elements inside block (warps per block: 4)
				if (threadIdx.x < warpsPerBlock)
				{
					warpReduceRes[threadIdx.x] = cutArray[threadIdx.x * WARP_SIZE];
					warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
					warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
					warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				}

				////////////////////////////////////////////////////////////////////////////
				// Reduce 4 elements
				// (WARNING! garbage in warpReduceRes can ruin your life
				// check twice number of warps per block
				////////////////////////////////////////////////////////////////////////////
				// Compiler should resolve this if compile-time
				if (warpsPerBlock == 8)
				{
					if (threadIdx.x < 4)
					{
						REDUCE_4_BLOCK(warpReduceRes);
						REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_4_BLOCK(warpReduceRes_GradNorm);
					}
				}
				else
				{
					if (threadIdx.x < 2)
					{
						REDUCE_2_BLOCK(warpReduceRes);
						REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_2_BLOCK(warpReduceRes_GradNorm);
					}
				}

				// Write result of in-block reduce into global memory
				if (threadIdx.x == 0)
				{
					ptr_internal_results  [blockIdx.x] = warpReduceRes[0];
					ptr_internal_results_1[blockIdx.x] = warpReduceRes_PhiPhi[0];
					ptr_internal_results_2[blockIdx.x] = warpReduceRes_BetaNorm[0];
					ptr_internal_results_3[blockIdx.x] = warpReduceRes_GradNorm[0];

					__threadfence();
				}

				SYNCALL_BEGIN()

					// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

					if (threadIdx.x < WARP_SIZE)
					{
						cutArray[threadIdx.x] = 0.0;

						cutArray_PhiPhi[threadIdx.x] = 0.0f;
						cutArray_BetaNorm[threadIdx.x] = 0.0f;
						cutArray_GradNorm[threadIdx.x] = 0.0f;
					}
					
					// Fetch from global to shared memory (speed boost)
					if (threadIdx.x < gridDim.x)
					{
						cutArray[threadIdx.x] = ptr_internal_results[threadIdx.x];
						cutArray_PhiPhi[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
						cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
						cutArray_GradNorm[threadIdx.x] = ptr_internal_results_3[threadIdx.x];
					}

					// Reduce (number of multiprocessor is less than 32)
					// and empty cells in shared tmp are initialized to zero
					// so full-reduce can be done
					if (threadIdx.x < 16)
					{
						cutArray[threadIdx.x] += cutArray[threadIdx.x + 16];
						cutArray[threadIdx.x] += cutArray[threadIdx.x +  8];
						cutArray[threadIdx.x] += cutArray[threadIdx.x +  4];
						cutArray[threadIdx.x] += cutArray[threadIdx.x +  2];
						cutArray[threadIdx.x] += cutArray[threadIdx.x +  1];

						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

						// write result in thread 0, block 0
						if (threadIdx.x == 0)
						{
							BetaNorm = cutArray_BetaNorm[0];
							PhiPhi = cutArray_PhiPhi[0];
				
							GradNorm = cutArray_GradNorm[0];

							// dot_phiAp
							interm_dot = cutArray[0];

							// interm_dot_1 should be dot_pAp
							beta = cutArray[0] / interm_dot_1;

							__threadfence();
						}
					}
					
				SYNCALL_END();


				if (threadIdx.x == 0)
				{
					sh_beta = beta;
				}
				__syncthreads();


				// Calculate p = phi(x_k+1) - beta * p;
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float tmp_float = ptr_m_g[i];

					if ( (fabs(ptr_m_lambda[i] - ptr_m_Lo[i]) < ACT_SET_THRESHOLD) ||
						 (fabs(ptr_m_lambda[i] - ptr_m_Hi[i]) < ACT_SET_THRESHOLD) )
					{
						tmp_float = 0.0f;
					}

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
					// Preconditioner in face:
					// p = z - beta * p;
					ptr_m_p[i] = (ptr_m_invDiag[i] * tmp_float) - sh_beta * ptr_m_p[i];
#else
					ptr_m_p[i] = tmp_float - sh_beta * ptr_m_p[i];
#endif
				}

			}
			else
			{
				//////////////////////////////////////////////////////////////////////////
				// LCPCG: Expansion Step
				//////////////////////////////////////////////////////////////////////////

				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_g;
					interm_g = ptr_m_g[i] - sh_alpha_f * ptr_m_ap_vec[i];
					ptr_m_g[i] = interm_g;

					float interm_lambda;
					interm_lambda = ptr_m_lambda[i] - sh_alpha_f * ptr_m_p[i];
					
					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];

					if ( (abs(interm_lambda - interm_Lo) > ACT_SET_THRESHOLD) &&
						 (abs(interm_lambda - interm_Hi) > ACT_SET_THRESHOLD) )
					{
						interm_lambda -= sh_alpha_fucked * interm_g;
					}

					if (interm_lambda < interm_Lo)
					{
						interm_lambda = interm_Lo;
					}
					else if (interm_lambda > interm_Hi)
					{
						interm_lambda = interm_Hi;
					}

					ptr_m_lambda[i] = interm_lambda;
				}

				SYNCALL_BEGIN();
				SYNCALL_END();

				float vec_component;

				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
				{
					float reg_m_a_x = 0.0f;
					float reg_m_a_y = 0.0f;
					float reg_m_a_z = 0.0f;

					unsigned int JointTripleNum, JointNodeOffset;
					unsigned int JointIndex;

					JointTripleNum = ptr_m_NodesJointsNum_00[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_01[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
						reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_02[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_03[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_04[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_05[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
					}

					ptr_m_a_x[i] = reg_m_a_x;
					ptr_m_a_y[i] = reg_m_a_y;
					ptr_m_a_z[i] = reg_m_a_z;
				}

				SYNCALL_BEGIN();
				SYNCALL_END();

				cutArray_PhiPhi[threadIdx.x] = 0.0f;
				cutArray_BetaNorm[threadIdx.x] = 0.0f;
				cutArray_GradNorm[threadIdx.x] = 0.0f;

				// Reduce huge array into several (known number) fixed-size subarrays
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_lambda = ptr_m_lambda[i];
					vec_component = interm_lambda;

					unsigned int t0Idx = ptr_m_JtdNodes_00[i];
					unsigned int t1Idx = ptr_m_JtdNodes_01[i];
					unsigned int t2Idx = ptr_m_JtdNodes_02[i];
					unsigned int t3Idx = ptr_m_JtdNodes_03[i];
					unsigned int t4Idx = ptr_m_JtdNodes_04[i];
					unsigned int t5Idx = ptr_m_JtdNodes_05[i];

					float reg_m_g = ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
									ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
									ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
									ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
									ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
									ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

					reg_m_g += ptr_m_CFM[i] * vec_component;
					reg_m_g -= ptr_m_RHS[i];

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
					// Preconditioning
					reg_m_g *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					reg_m_g *= ptr_m_invDiag[i];
#endif

					ptr_m_g[i] = reg_m_g;

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
					// Preconditioner in face:
					// p = z
					float reg_m_p = (ptr_m_invDiag[i] * reg_m_g);
#else
					float reg_m_p = reg_m_g;
#endif


					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];
					float tmp_float2 = interm_lambda - interm_Lo;
					float tmp_float3 = interm_lambda - interm_Hi;
					
					if (abs(tmp_float2) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g < 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float3 / sh_alpha_fucked > reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g > 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float2 / sh_alpha_fucked < reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else
					{
						cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;

						if (reg_m_g > 0.0f)
						{
							if ((tmp_float2 / sh_alpha_fucked) < reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
						else if (reg_m_g < 0.0f)
						{
							if ((tmp_float3 / sh_alpha_fucked) > reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
					}

					ptr_m_p[i] = reg_m_p;
				}


				// Reduce 32 elements inside one warp
				if (threadInWarp < 16)
				{
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
				}
				__syncthreads();

				// Reduce elements inside block (warps per block: 4)
				if (threadIdx.x < warpsPerBlock)
				{
					warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
					warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
					warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				}

				////////////////////////////////////////////////////////////////////////////
				// Reduce 4 elements
				// (WARNING! garbage in warpReduceRes can ruin your life
				// check twice number of warps per block
				////////////////////////////////////////////////////////////////////////////
				// Compiler should resolve this if compile-time
				if (warpsPerBlock == 8)
				{
					if (threadIdx.x < 4)
					{
						REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_4_BLOCK(warpReduceRes_GradNorm);
					}
				}
				else
				{
					if (threadIdx.x < 2)
					{
						REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_2_BLOCK(warpReduceRes_GradNorm);
					}
				}

				// Write result of in-block reduce into global memory
				if (threadIdx.x == 0)
				{
					ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
					ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];
					ptr_internal_results_2[blockIdx.x] = warpReduceRes_GradNorm[0];

					__threadfence();
				}

				SYNCALL_BEGIN()

					// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

					if (threadIdx.x < WARP_SIZE)
					{
						cutArray_PhiPhi[threadIdx.x] = 0.0f;
						cutArray_BetaNorm[threadIdx.x] = 0.0f;
						cutArray_GradNorm[threadIdx.x] = 0.0f;
					}
					
					// Fetch from global to shared memory (speed boost)
					if (threadIdx.x < gridDim.x)
					{
						cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
						cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
						cutArray_GradNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
					}

					// Reduce (number of multiprocessor is less than 32)
					// and empty cells in shared tmp are initialized to zero
					// so full-reduce can be done
					if (threadIdx.x < 16)
					{
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

						// write result in thread 0, block 0
						if (threadIdx.x == 0)
						{
							BetaNorm = cutArray_BetaNorm[0];
							PhiPhi = cutArray_PhiPhi[0];

							GradNorm = cutArray_GradNorm[0];

							__threadfence();
						}
					}
					
				SYNCALL_END();
			}

			if (threadIdx.x == 0)
			{
				sh_BetaNorm = BetaNorm;
				sh_PhiPhi = PhiPhi;

				sh_GradNorm = GradNorm;
			}
			__syncthreads();

		}
		else
		{
			//////////////////////////////////////////////////////////////////////////
			// LCPCG: Proportioning Step
			//////////////////////////////////////////////////////////////////////////

#if (PROPORTIONING_DOUBLE_STEP == 1)

			// For alpha_feasible
			cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;

			// Find vector d = Beta(x^k);
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				ptr_m_d[i] = 0.0f;

				float tmp_float2 = ptr_m_lambda[i] - ptr_m_Lo[i];
				float tmp_float3 = ptr_m_lambda[i] - ptr_m_Hi[i];

				if (abs(tmp_float2) < ACT_SET_THRESHOLD)
				{
					float reg_m_g = ptr_m_g[i];
					if (reg_m_g > 0.0f)
					{
						reg_m_g = 0.0f;
					}
					else
					{
						ptr_m_d[i] = reg_m_g;

						if (tmp_float3 / reg_m_g < cutArray_PhiPhi[threadIdx.x])
						{
							cutArray_PhiPhi[threadIdx.x] = tmp_float3 / reg_m_g;
						}
					}
				}
				else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
				{
					float reg_m_g = ptr_m_g[i];
					if (reg_m_g < 0.0f)
					{
						reg_m_g = 0.0f;
					}
					else
					{
						ptr_m_d[i] = reg_m_g;

						if (tmp_float2 / reg_m_g < cutArray_PhiPhi[threadIdx.x])
						{
							cutArray_PhiPhi[threadIdx.x] = tmp_float2 / reg_m_g;
						}
					}
				}
			}

			SYNCALL_BEGIN();
			SYNCALL_END();

			float vec_component;

			// Calculate a = A * d
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
			{
				float reg_m_a_x = 0.0f;
				float reg_m_a_y = 0.0f;
				float reg_m_a_z = 0.0f;

				unsigned int JointTripleNum, JointNodeOffset;
				unsigned int JointIndex;

				JointTripleNum = ptr_m_NodesJointsNum_00[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_01[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
					reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_02[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_03[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_04[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_05[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
				}

				ptr_m_a_x[i] = reg_m_a_x;
				ptr_m_a_y[i] = reg_m_a_y;
				ptr_m_a_z[i] = reg_m_a_z;
			}

			SYNCALL_BEGIN();
			SYNCALL_END();

			cutArray_BetaNorm[threadIdx.x] = 0.0f;
			cutArray_GradNorm[threadIdx.x] = 0.0f;
			
			// Calculate ad_vec = A * d = J * a
			// Calculate "cut arrays" for dot_dAd, dot_gd
			// Reduce huge array into several (known number) fixed-size subarrays
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				float reg_m_d = ptr_m_d[i];

				unsigned int t0Idx = ptr_m_JtdNodes_00[i];
				unsigned int t1Idx = ptr_m_JtdNodes_01[i];
				unsigned int t2Idx = ptr_m_JtdNodes_02[i];
				unsigned int t3Idx = ptr_m_JtdNodes_03[i];
				unsigned int t4Idx = ptr_m_JtdNodes_04[i];
				unsigned int t5Idx = ptr_m_JtdNodes_05[i];

				float reg_m_ad_vec =
					ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
					ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
					ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
					ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
					ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
					ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

				reg_m_ad_vec += ptr_m_CFM[i] * reg_m_d;

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
				// Preconditioning
				reg_m_ad_vec *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				reg_m_ad_vec *= ptr_m_invDiag[i];
#endif

				ptr_m_ad_vec[i] = reg_m_ad_vec;

				// dot_dAd
				cutArray_GradNorm[threadIdx.x] += reg_m_d * reg_m_ad_vec;
				// dot_gd
				cutArray_BetaNorm[threadIdx.x] += ptr_m_g[i] * reg_m_d;
			}


			// Reduce 32 elements inside one warp
			if (threadInWarp < 16)
			{
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

				// MINIMUM
				if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
				}
				if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
				}
				if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
				}
				if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
				}
				if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
				}
			}
			__syncthreads();

			// Reduce elements inside block (warps per block: 4)
			if (threadIdx.x < warpsPerBlock)
			{
				warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
			}

			////////////////////////////////////////////////////////////////////////////
			// Reduce 4 elements
			// (WARNING! garbage in warpReduceRes can ruin your life
			// check twice number of warps per block
			////////////////////////////////////////////////////////////////////////////
			// Compiler should resolve this if compile-time
			if (warpsPerBlock == 8)
			{
				if (threadIdx.x < 4)
				{
					REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_4_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_4_BLOCK(warpReduceRes_PhiPhi);
				}
			}
			else
			{
				if (threadIdx.x < 2)
				{
					REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_2_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_2_BLOCK(warpReduceRes_PhiPhi);
				}
			}

			// Write result of in-block reduce into global memory
			if (threadIdx.x == 0)
			{
				ptr_internal_results[blockIdx.x] = warpReduceRes_BetaNorm[0];
				ptr_internal_results_1[blockIdx.x] = warpReduceRes_GradNorm[0];
				ptr_internal_results_2[blockIdx.x] = warpReduceRes_PhiPhi[0];

				__threadfence();
			}

			__shared__ volatile bool sh_HalfStep;

			SYNCALL_BEGIN()

				// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

				if (threadIdx.x < WARP_SIZE)
				{
					cutArray_BetaNorm[threadIdx.x] = 0.0f;
					cutArray_GradNorm[threadIdx.x] = 0.0f;
					
					cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;
				}
				
				// Fetch from global to shared memory (speed boost)
				if (threadIdx.x < gridDim.x)
				{
					cutArray_BetaNorm[threadIdx.x] = ptr_internal_results[threadIdx.x];
					cutArray_GradNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];

					cutArray_PhiPhi[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
				}

				// Reduce (number of multiprocessor is less than 32)
				// and empty cells in shared tmp are initialized to zero
				// so full-reduce can be done
				if (threadIdx.x < 16)
				{
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

					// MINIMUM
					if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
					}
					if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
					}
					if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
					}
					if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
					}
					if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
					}

					// write result in thread 0, block 0
					if (threadIdx.x == 0)
					{
//						alpha_cg = dot_gd / dot_dAd;
						alpha_cg = cutArray_BetaNorm[0] / cutArray_GradNorm[0];

						HalfStep = false;

						// Set alpha to alpha_feasible if needed
						if (cutArray_PhiPhi[0] < alpha_cg)
						{
							alpha_cg = cutArray_PhiPhi[0];
							HalfStep = true;
						}

						__threadfence();
					}
				}
				
			SYNCALL_END();

			// Transfer calculated alpha_cg from __device__ to __shared__ [speed boost]
			if (threadIdx.x == 0)
			{
				sh_alpha_cg = alpha_cg;
				sh_HalfStep = HalfStep;
			}
			__syncthreads();

			if (sh_HalfStep == true)
			{
				cutArray_PhiPhi[threadIdx.x] = 0.0f;
				cutArray_BetaNorm[threadIdx.x] = 0.0f;
				cutArray_GradNorm[threadIdx.x] = 0.0f;

				// Calculate:
				//		x_k+1/2 = x_k - alpha_cg * d;
				//		g = g - alpha_cg * A * d;
				//		x_k+1 = Proj(x_k+1/2 - alpha * Beta(x_k+1/2);
				// Calculate:
				// lambda -= alpha_cg * d;
				// g -= alpha_cg * (A * d);
				// p = phi(x^k);
				// "cut arrays" for ||Beta||, Phi dot Phi~, ||gradient||
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_lambda;
					interm_lambda = ptr_m_lambda[i] - sh_alpha_cg * ptr_m_d[i];

					float reg_m_g;
					reg_m_g = ptr_m_g[i] - sh_alpha_cg * ptr_m_ad_vec[i];
					ptr_m_g[i] = reg_m_g;
					

					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];
					float tmp_float2 = interm_lambda - interm_Lo;
					float tmp_float3 = interm_lambda - interm_Hi;
					
					if (abs(tmp_float2) < ACT_SET_THRESHOLD)
					{
						if (reg_m_g < 0.0f)
						{
							interm_lambda -= sh_alpha_fucked * reg_m_g;
						}
					}
					else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
					{
						if (reg_m_g > 0.0f)
						{
							interm_lambda -= sh_alpha_fucked * reg_m_g;
						}
					}

					if (interm_lambda < interm_Lo)
					{
						interm_lambda = interm_Lo;
					}
					else if (interm_lambda > interm_Hi)
					{
						interm_lambda = interm_Hi;
					}

					ptr_m_lambda[i] = interm_lambda;
				}


				// Calculate a = J * lambda
				// for gradient calculation
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
				{
					float reg_m_a_x = 0.0f;
					float reg_m_a_y = 0.0f;
					float reg_m_a_z = 0.0f;

					unsigned int JointTripleNum, JointNodeOffset;
					unsigned int JointIndex;

					JointTripleNum = ptr_m_NodesJointsNum_00[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_01[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
						reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_02[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_03[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_04[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
					}

					JointTripleNum = ptr_m_NodesJointsNum_05[i];
					JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
					for (int j = 0; j < JointTripleNum; ++j)
					{
						JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
						vec_component = ptr_m_lambda[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
						vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

						reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
						reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
						reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
					}

					ptr_m_a_x[i] = reg_m_a_x;
					ptr_m_a_y[i] = reg_m_a_y;
					ptr_m_a_z[i] = reg_m_a_z;
				}

				SYNCALL_BEGIN();
				SYNCALL_END();

				// Calculate second Half-Step of double-step proportion:
				//		g = A * x_k+1 - b;
				//		p = phi(x_k+1);
				//		recalculate BetaNorm, Phi_phi

				cutArray_PhiPhi[threadIdx.x] = 0.0f;
				cutArray_BetaNorm[threadIdx.x] = 0.0f;
				cutArray_GradNorm[threadIdx.x] = 0.0f;

				// Reduce huge array into several (known number) fixed-size subarrays
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_lambda = ptr_m_lambda[i];

					unsigned int t0Idx = ptr_m_JtdNodes_00[i];
					unsigned int t1Idx = ptr_m_JtdNodes_01[i];
					unsigned int t2Idx = ptr_m_JtdNodes_02[i];
					unsigned int t3Idx = ptr_m_JtdNodes_03[i];
					unsigned int t4Idx = ptr_m_JtdNodes_04[i];
					unsigned int t5Idx = ptr_m_JtdNodes_05[i];

					float reg_m_g = ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
									ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
									ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
									ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
									ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
									ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

					reg_m_g += ptr_m_CFM[i] * interm_lambda;
					reg_m_g -= ptr_m_RHS[i];

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
					// Preconditioning
					reg_m_g *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					reg_m_g *= ptr_m_invDiag[i];
#endif

					ptr_m_g[i] = reg_m_g;

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
					// Preconditioner in face:
					// p = z
					float reg_m_p = (ptr_m_invDiag[i] * reg_m_g);
#else
					float reg_m_p = reg_m_g;
#endif


					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];
					float tmp_float2 = interm_lambda - interm_Lo;
					float tmp_float3 = interm_lambda - interm_Hi;
					
					if (abs(tmp_float2) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g < 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float3 / sh_alpha_fucked > reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g > 0.0f)
						{
#if (PROPORTIONING_DOUBLE_STEP == 1)

							if (tmp_float2 / sh_alpha_fucked < reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

#else

							cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;

#endif

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else
					{
						cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;

						if (reg_m_g > 0.0f)
						{
							if ((tmp_float2 / sh_alpha_fucked) < reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
						else if (reg_m_g < 0.0f)
						{
							if ((tmp_float3 / sh_alpha_fucked) > reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
					}

					ptr_m_p[i] = reg_m_p;
				}				


				// Reduce 32 elements inside one warp
				if (threadInWarp < 16)
				{
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
				}
				__syncthreads();

				// Reduce elements inside block (warps per block: 4)
				if (threadIdx.x < warpsPerBlock)
				{
					warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
					warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
					warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				}

				////////////////////////////////////////////////////////////////////////////
				// Reduce 4 elements
				// (WARNING! garbage in warpReduceRes can ruin your life
				// check twice number of warps per block
				////////////////////////////////////////////////////////////////////////////
				// Compiler should resolve this if compile-time
				if (warpsPerBlock == 8)
				{
					if (threadIdx.x < 4)
					{
						REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_4_BLOCK(warpReduceRes_GradNorm);
					}
				}
				else
				{
					if (threadIdx.x < 2)
					{
						REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_2_BLOCK(warpReduceRes_GradNorm);
					}
				}

				// Write result of in-block reduce into global memory
				if (threadIdx.x == 0)
				{
					ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
					ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];
					ptr_internal_results_2[blockIdx.x] = warpReduceRes_GradNorm[0];

					__threadfence();
				}

				SYNCALL_BEGIN()

					// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

					if (threadIdx.x < WARP_SIZE)
					{
						cutArray_PhiPhi[threadIdx.x] = 0.0f;
						cutArray_BetaNorm[threadIdx.x] = 0.0f;
						cutArray_GradNorm[threadIdx.x] = 0.0f;
					}
					
					// Fetch from global to shared memory (speed boost)
					if (threadIdx.x < gridDim.x)
					{
						cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
						cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
						cutArray_GradNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
					}

					// Reduce (number of multiprocessor is less than 32)
					// and empty cells in shared tmp are initialized to zero
					// so full-reduce can be done
					if (threadIdx.x < 16)
					{
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

						// write result in thread 0, block 0
						if (threadIdx.x == 0)
						{
							BetaNorm = cutArray_BetaNorm[0];
							PhiPhi = cutArray_PhiPhi[0];
				
							GradNorm = cutArray_GradNorm[0];

							__threadfence();
						}
					}
					
				SYNCALL_END();

				// Transfer calculated values from __device__ to __shared__ memory [speed boost]
				if (threadIdx.x == 0)
				{
					sh_BetaNorm = BetaNorm;
					sh_PhiPhi = PhiPhi;

					sh_GradNorm = GradNorm;
				}
				__syncthreads();			
			}
			else
			{
				cutArray_PhiPhi[threadIdx.x] = 0.0f;
				cutArray_BetaNorm[threadIdx.x] = 0.0f;
				cutArray_GradNorm[threadIdx.x] = 0.0f;

				// Calculate:
				// lambda -= alpha_cg * d;
				// g -= alpha_cg * (A * d);
				// p = phi(x^k);
				// "cut arrays" for ||Beta||, Phi dot Phi~, ||gradient||
				for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
				{
					float interm_lambda;
					interm_lambda = ptr_m_lambda[i] - sh_alpha_cg * ptr_m_d[i];
					ptr_m_lambda[i] = interm_lambda;

					float reg_m_g;
					reg_m_g = ptr_m_g[i] - sh_alpha_cg * ptr_m_ad_vec[i];
					ptr_m_g[i] = reg_m_g;
					

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
					// Preconditioner in face:
					// p = z
					float reg_m_p = (ptr_m_invDiag[i] * reg_m_g);
#else
					float reg_m_p = reg_m_g;
#endif


					float interm_Lo = ptr_m_Lo[i];
					float interm_Hi = ptr_m_Hi[i];
					float tmp_float2 = interm_lambda - interm_Lo;
					float tmp_float3 = interm_lambda - interm_Hi;
					
					if (abs(tmp_float2) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g < 0.0f)
						{
							if (tmp_float3 / sh_alpha_fucked > reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
					{
						reg_m_p = 0.0f;

						if (reg_m_g > 0.0f)
						{
							if (tmp_float2 / sh_alpha_fucked < reg_m_g)
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
							}

							cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else
					{
						cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;

						if (reg_m_g > 0.0f)
						{
							if ((tmp_float2 / sh_alpha_fucked) < reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
						else if (reg_m_g < 0.0f)
						{
							if ((tmp_float3 / sh_alpha_fucked) > reg_m_g)
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
							}
							else
							{
								cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
							}
						}
					}

					ptr_m_p[i] = reg_m_p;
				}

				// Reduce 32 elements inside one warp
				if (threadInWarp < 16)
				{
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
				}
				__syncthreads();

				// Reduce elements inside block (warps per block: 4)
				if (threadIdx.x < warpsPerBlock)
				{
					warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
					warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
					warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				}

				////////////////////////////////////////////////////////////////////////////
				// Reduce 4 elements
				// (WARNING! garbage in warpReduceRes can ruin your life
				// check twice number of warps per block
				////////////////////////////////////////////////////////////////////////////
				// Compiler should resolve this if compile-time
				if (warpsPerBlock == 8)
				{
					if (threadIdx.x < 4)
					{
						REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_4_BLOCK(warpReduceRes_GradNorm);
					}
				}
				else
				{
					if (threadIdx.x < 2)
					{
						REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
						REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
						REDUCE_2_BLOCK(warpReduceRes_GradNorm);
					}
				}

				// Write result of in-block reduce into global memory
				if (threadIdx.x == 0)
				{
					ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
					ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];
					ptr_internal_results_2[blockIdx.x] = warpReduceRes_GradNorm[0];

					__threadfence();
				}

				SYNCALL_BEGIN()

					// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

					if (threadIdx.x < WARP_SIZE)
					{
						cutArray_PhiPhi[threadIdx.x] = 0.0f;
						cutArray_BetaNorm[threadIdx.x] = 0.0f;
						cutArray_GradNorm[threadIdx.x] = 0.0f;
					}
					
					// Fetch from global to shared memory (speed boost)
					if (threadIdx.x < gridDim.x)
					{
						cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
						cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
						cutArray_GradNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
					}

					// Reduce (number of multiprocessor is less than 32)
					// and empty cells in shared tmp are initialized to zero
					// so full-reduce can be done
					if (threadIdx.x < 16)
					{
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
						cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
						cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
						cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

						// write result in thread 0, block 0
						if (threadIdx.x == 0)
						{
							BetaNorm = cutArray_BetaNorm[0];
							PhiPhi = cutArray_PhiPhi[0];
				
							GradNorm = cutArray_GradNorm[0];

							__threadfence();
						}
					}
					
				SYNCALL_END();

				// Transfer calculated values from __device__ to __shared__ memory [speed boost]
				if (threadIdx.x == 0)
				{
					sh_BetaNorm = BetaNorm;
					sh_PhiPhi = PhiPhi;

					sh_GradNorm = GradNorm;
				}
				__syncthreads();
			}

#else

			// For alpha_feasible
			cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;

			// Find vector d = Beta(x^k);
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				ptr_m_d[i] = 0.0f;

				float tmp_float2 = ptr_m_lambda[i] - ptr_m_Lo[i];
				float tmp_float3 = ptr_m_lambda[i] - ptr_m_Hi[i];

				if (abs(tmp_float2) < ACT_SET_THRESHOLD)
				{
					float reg_m_g = ptr_m_g[i];
					if (reg_m_g > 0.0f)
					{
						reg_m_g = 0.0f;
					}
					else
					{
						ptr_m_d[i] = reg_m_g;

						if (tmp_float3 / reg_m_g < cutArray_PhiPhi[threadIdx.x])
						{
							cutArray_PhiPhi[threadIdx.x] = tmp_float3 / reg_m_g;
						}
					}
				}
				else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
				{
					float reg_m_g = ptr_m_g[i];
					if (reg_m_g < 0.0f)
					{
						reg_m_g = 0.0f;
					}
					else
					{
						ptr_m_d[i] = reg_m_g;

						if (tmp_float2 / reg_m_g < cutArray_PhiPhi[threadIdx.x])
						{
							cutArray_PhiPhi[threadIdx.x] = tmp_float2 / reg_m_g;
						}
					}
				}
			}

			SYNCALL_BEGIN();
			SYNCALL_END();

			float vec_component;

			// Calculate a = A * d
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
			{
				float reg_m_a_x = 0.0f;
				float reg_m_a_y = 0.0f;
				float reg_m_a_z = 0.0f;

				unsigned int JointTripleNum, JointNodeOffset;
				unsigned int JointIndex;

				JointTripleNum = ptr_m_NodesJointsNum_00[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_01[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
					reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_02[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_03[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_04[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
				}

				JointTripleNum = ptr_m_NodesJointsNum_05[i];
				JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
				for (int j = 0; j < JointTripleNum; ++j)
				{
					JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
					vec_component = ptr_m_d[JointIndex];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
					vec_component *= tex1Dfetch( texref_m_invDiag, JointIndex );
#endif

					reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
					reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
					reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
				}

				ptr_m_a_x[i] = reg_m_a_x;
				ptr_m_a_y[i] = reg_m_a_y;
				ptr_m_a_z[i] = reg_m_a_z;
			}

			SYNCALL_BEGIN();
			SYNCALL_END();

			cutArray_BetaNorm[threadIdx.x] = 0.0f;
			cutArray_GradNorm[threadIdx.x] = 0.0f;
			
			// Calculate ad_vec = A * d = J * a
			// Calculate "cut arrays" for dot_dAd, dot_gd
			// Reduce huge array into several (known number) fixed-size subarrays
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				float reg_m_d = ptr_m_d[i];

				unsigned int t0Idx = ptr_m_JtdNodes_00[i];
				unsigned int t1Idx = ptr_m_JtdNodes_01[i];
				unsigned int t2Idx = ptr_m_JtdNodes_02[i];
				unsigned int t3Idx = ptr_m_JtdNodes_03[i];
				unsigned int t4Idx = ptr_m_JtdNodes_04[i];
				unsigned int t5Idx = ptr_m_JtdNodes_05[i];

				float reg_m_ad_vec =
					ptr_m_J_00[i] * ptr_m_a_x[t0Idx] + ptr_m_J_01[i] * ptr_m_a_y[t0Idx] + ptr_m_J_02[i] * ptr_m_a_z[t0Idx] +
					ptr_m_J_03[i] * ptr_m_a_x[t1Idx] + ptr_m_J_04[i] * ptr_m_a_y[t1Idx] + ptr_m_J_05[i] * ptr_m_a_z[t1Idx] +
					ptr_m_J_06[i] * ptr_m_a_x[t2Idx] + ptr_m_J_07[i] * ptr_m_a_y[t2Idx] + ptr_m_J_08[i] * ptr_m_a_z[t2Idx] +
					ptr_m_J_09[i] * ptr_m_a_x[t3Idx] + ptr_m_J_10[i] * ptr_m_a_y[t3Idx] + ptr_m_J_11[i] * ptr_m_a_z[t3Idx] +
					ptr_m_J_12[i] * ptr_m_a_x[t4Idx] + ptr_m_J_13[i] * ptr_m_a_y[t4Idx] + ptr_m_J_14[i] * ptr_m_a_z[t4Idx] +
					ptr_m_J_15[i] * ptr_m_a_x[t5Idx] + ptr_m_J_16[i] * ptr_m_a_y[t5Idx] + ptr_m_J_17[i] * ptr_m_a_z[t5Idx];

				reg_m_ad_vec += ptr_m_CFM[i] * reg_m_d;

#if (LCPCG_PRECONDITIONER_LEFTJACOBI == 1)
				// Preconditioning
				reg_m_ad_vec *= ptr_m_invDiag[i];
#endif

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)
				reg_m_ad_vec *= ptr_m_invDiag[i];
#endif

				ptr_m_ad_vec[i] = reg_m_ad_vec;

				// dot_dAd
				cutArray_GradNorm[threadIdx.x] += reg_m_d * reg_m_ad_vec;
				// dot_gd
				cutArray_BetaNorm[threadIdx.x] += ptr_m_g[i] * reg_m_d;
			}


			// Reduce 32 elements inside one warp
			if (threadInWarp < 16)
			{
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

				// MINIMUM
				if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
				}
				if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
				}
				if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
				}
				if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
				}
				if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
				{
					cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
				}
			}
			__syncthreads();

			// Reduce elements inside block (warps per block: 4)
			if (threadIdx.x < warpsPerBlock)
			{
				warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
			}

			////////////////////////////////////////////////////////////////////////////
			// Reduce 4 elements
			// (WARNING! garbage in warpReduceRes can ruin your life
			// check twice number of warps per block
			////////////////////////////////////////////////////////////////////////////
			// Compiler should resolve this if compile-time
			if (warpsPerBlock == 8)
			{
				if (threadIdx.x < 4)
				{
					REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_4_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_4_BLOCK(warpReduceRes_PhiPhi);
				}
			}
			else
			{
				if (threadIdx.x < 2)
				{
					REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_2_BLOCK(warpReduceRes_GradNorm);

					REDUCE_MIN_2_BLOCK(warpReduceRes_PhiPhi);
				}
			}

			// Write result of in-block reduce into global memory
			if (threadIdx.x == 0)
			{
				ptr_internal_results[blockIdx.x] = warpReduceRes_BetaNorm[0];
				ptr_internal_results_1[blockIdx.x] = warpReduceRes_GradNorm[0];
				ptr_internal_results_2[blockIdx.x] = warpReduceRes_PhiPhi[0];

				__threadfence();
			}

			SYNCALL_BEGIN()

				// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

				if (threadIdx.x < WARP_SIZE)
				{
					cutArray_BetaNorm[threadIdx.x] = 0.0f;
					cutArray_GradNorm[threadIdx.x] = 0.0f;
					
					cutArray_PhiPhi[threadIdx.x] = CUDA_FLT_MAX;
				}
				
				// Fetch from global to shared memory (speed boost)
				if (threadIdx.x < gridDim.x)
				{
					cutArray_BetaNorm[threadIdx.x] = ptr_internal_results[threadIdx.x];
					cutArray_GradNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];

					cutArray_PhiPhi[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
				}

				// Reduce (number of multiprocessor is less than 32)
				// and empty cells in shared tmp are initialized to zero
				// so full-reduce can be done
				if (threadIdx.x < 16)
				{
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

					// MINIMUM
					if (cutArray_PhiPhi[threadIdx.x + 16] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x + 16];
					}
					if (cutArray_PhiPhi[threadIdx.x +  8] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  8];
					}
					if (cutArray_PhiPhi[threadIdx.x +  4] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  4];
					}
					if (cutArray_PhiPhi[threadIdx.x +  2] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  2];
					}
					if (cutArray_PhiPhi[threadIdx.x +  1] < cutArray_PhiPhi[threadIdx.x])
					{
						cutArray_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x +  1];
					}

					// write result in thread 0, block 0
					if (threadIdx.x == 0)
					{
//						alpha_cg = dot_gd / dot_dAd;
						alpha_cg = cutArray_BetaNorm[0] / cutArray_GradNorm[0];

						// Set alpha to alpha_feasible if needed
						if (cutArray_PhiPhi[0] < alpha_cg)
						{
							alpha_cg = cutArray_PhiPhi[0];
						}

						__threadfence();
					}
				}
				
			SYNCALL_END();

			// Transfer calculated alpha_cg from __device__ to __shared__ [speed boost]
			if (threadIdx.x == 0)
			{
				sh_alpha_cg = alpha_cg;
			}
			__syncthreads();

			cutArray_PhiPhi[threadIdx.x] = 0.0f;
			cutArray_BetaNorm[threadIdx.x] = 0.0f;
			cutArray_GradNorm[threadIdx.x] = 0.0f;

			// Calculate:
			// lambda -= alpha_cg * d;
			// g -= alpha_cg * (A * d);
			// p = phi(x^k);
			// "cut arrays" for ||Beta||, Phi dot Phi~, ||gradient||
			for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
			{
				float interm_lambda;
				interm_lambda = ptr_m_lambda[i] - sh_alpha_cg * ptr_m_d[i];
				ptr_m_lambda[i] = interm_lambda;

				float reg_m_g;
				reg_m_g = ptr_m_g[i] - sh_alpha_cg * ptr_m_ad_vec[i];
				ptr_m_g[i] = reg_m_g;
				

#if (LCPCG_PRECONDITIONER_FACEJACOBI == 1)
				// Preconditioner in face:
				// p = z
				float reg_m_p = (ptr_m_invDiag[i] * reg_m_g);
#else
				float reg_m_p = reg_m_g;
#endif


				float interm_Lo = ptr_m_Lo[i];
				float interm_Hi = ptr_m_Hi[i];
				float tmp_float2 = interm_lambda - interm_Lo;
				float tmp_float3 = interm_lambda - interm_Hi;
				
				if (abs(tmp_float2) < ACT_SET_THRESHOLD)
				{
					reg_m_p = 0.0f;

					if (reg_m_g < 0.0f)
					{
						cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
						cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
					}
				}
				else if (abs(tmp_float3) < ACT_SET_THRESHOLD)
				{
					reg_m_p = 0.0f;

					if (reg_m_g > 0.0f)
					{
						cutArray_BetaNorm[threadIdx.x] += reg_m_g * reg_m_g;
						cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
					}
				}
				else
				{
					cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;

					if (reg_m_g > 0.0f)
					{
						if ((tmp_float2 / sh_alpha_fucked) < reg_m_g)
						{
							cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float2 / sh_alpha_fucked;
						}
						else
						{
							cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
					else if (reg_m_g < 0.0f)
					{
						if ((tmp_float3 / sh_alpha_fucked) > reg_m_g)
						{
							cutArray_PhiPhi[threadIdx.x] += reg_m_g * tmp_float3 / sh_alpha_fucked;
						}
						else
						{
							cutArray_PhiPhi[threadIdx.x] += reg_m_g * reg_m_g;
						}
					}
				}

				ptr_m_p[i] = reg_m_p;
			}

			// Reduce 32 elements inside one warp
			if (threadInWarp < 16)
			{
				cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
				cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
				cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
				cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
				cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
				cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
				cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
			}
			__syncthreads();

			// Reduce elements inside block (warps per block: 4)
			if (threadIdx.x < warpsPerBlock)
			{
				warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
				warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
				warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
			}

			////////////////////////////////////////////////////////////////////////////
			// Reduce 4 elements
			// (WARNING! garbage in warpReduceRes can ruin your life
			// check twice number of warps per block
			////////////////////////////////////////////////////////////////////////////
			// Compiler should resolve this if compile-time
			if (warpsperBlock == 8)
			{
				if (threadIdx.x < 4)
				{
					REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
					REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_4_BLOCK(warpReduceRes_GradNorm);
				}
			}
			else
			{
				if (threadIdx.x < 2)
				{
					REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
					REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
					REDUCE_2_BLOCK(warpReduceRes_GradNorm);
				}
			}

			// Write result of in-block reduce into global memory
			if (threadIdx.x == 0)
			{
				ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
				ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];
				ptr_internal_results_2[blockIdx.x] = warpReduceRes_GradNorm[0];

				__threadfence();
			}

			SYNCALL_BEGIN()

				// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

				if (threadIdx.x < WARP_SIZE)
				{
					cutArray_PhiPhi[threadIdx.x] = 0.0f;
					cutArray_BetaNorm[threadIdx.x] = 0.0f;
					cutArray_GradNorm[threadIdx.x] = 0.0f;
				}
				
				// Fetch from global to shared memory (speed boost)
				if (threadIdx.x < gridDim.x)
				{
					cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
					cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
					cutArray_GradNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
				}

				// Reduce (number of multiprocessor is less than 32)
				// and empty cells in shared tmp are initialized to zero
				// so full-reduce can be done
				if (threadIdx.x < 16)
				{
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
					cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
					cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
					cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

					// write result in thread 0, block 0
					if (threadIdx.x == 0)
					{
						BetaNorm = cutArray_BetaNorm[0];
						PhiPhi = cutArray_PhiPhi[0];
			
						GradNorm = cutArray_GradNorm[0];

						__threadfence();
					}
				}
				
			SYNCALL_END();

			// Transfer calculated values from __device__ to __shared__ memory [speed boost]
			if (threadIdx.x == 0)
			{
				sh_BetaNorm = BetaNorm;
				sh_PhiPhi = PhiPhi;

				sh_GradNorm = GradNorm;
			}
			__syncthreads();

#endif
		}


		if (threadIdx.x == 0)
		{
			// Stopping criteria
			if (GradNorm / m_NumJoints < m_Precision)
			{
				sh_Iterations = 0;
			}
		}
		__syncthreads();
	}

	if (blockIdx.x == 0 && threadIdx.x == 0)
	{
		tmp_output_f = GradNorm;
		m_EffectiveIterations = k;
		__threadfence();
	}




// 	m_DotLambdaGrad = 0.0f;
	cutArray_PhiPhi[threadIdx.x] = 0.0f;

// 	m_LambdaNormSq = 0.0f;
	cutArray_BetaNorm[threadIdx.x] = 0.0f;

// 	m_GradNormSq = 0.0f;
	cutArray_GradNorm[threadIdx.x] = 0.0f;


	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumJoints; i += blockDim.x * gridDim.x)
	{
		float reg_m_g = ptr_m_g[i];
		float reg_m_lambda = ptr_m_lambda[i];

#if (LCPCG_PRECONDITIONER_DOUBLEJACOBI == 1)

		ptr_m_lambda[i] = ptr_m_invDiag[i] * reg_m_lambda;

#endif

		cutArray_PhiPhi[threadIdx.x] += reg_m_lambda * reg_m_g;
		cutArray_BetaNorm[threadIdx.x] += reg_m_lambda * reg_m_lambda;
		cutArray_GradNorm[threadIdx.x] += reg_m_g * reg_m_g;
	}

	// Reduce 32 elements inside one warp
	if (threadInWarp < 16)
	{
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
		cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
		cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

		cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
		cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
		cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
		cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
		cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];
	}
	__syncthreads();

	// Reduce elements inside block (warps per block: 4)
	if (threadIdx.x < warpsPerBlock)
	{
		warpReduceRes_PhiPhi[threadIdx.x] = cutArray_PhiPhi[threadIdx.x * WARP_SIZE];
		warpReduceRes_BetaNorm[threadIdx.x] = cutArray_BetaNorm[threadIdx.x * WARP_SIZE];
		warpReduceRes_GradNorm[threadIdx.x] = cutArray_GradNorm[threadIdx.x * WARP_SIZE];
	}

	////////////////////////////////////////////////////////////////////////////
	// Reduce 4 elements
	// (WARNING! garbage in warpReduceRes can ruin your life
	// check twice number of warps per block
	////////////////////////////////////////////////////////////////////////////
	// Compiler should resolve this if compile-time
	if (warpsPerBlock == 8)
	{
		if (threadIdx.x < 4)
		{
			REDUCE_4_BLOCK(warpReduceRes_PhiPhi);
			REDUCE_4_BLOCK(warpReduceRes_BetaNorm);
			REDUCE_4_BLOCK(warpReduceRes_GradNorm);
		}
	}
	else
	{
		if (threadIdx.x < 2)
		{
			REDUCE_2_BLOCK(warpReduceRes_PhiPhi);
			REDUCE_2_BLOCK(warpReduceRes_BetaNorm);
			REDUCE_2_BLOCK(warpReduceRes_GradNorm);
		}
	}

	// Write result of in-block reduce into global memory
	if (threadIdx.x == 0)
	{
		ptr_internal_results[blockIdx.x] = warpReduceRes_PhiPhi[0];
		ptr_internal_results_1[blockIdx.x] = warpReduceRes_BetaNorm[0];
		ptr_internal_results_2[blockIdx.x] = warpReduceRes_GradNorm[0];

		__threadfence();
	}

	SYNCALL_BEGIN()

		// ACTIONS INSIDE BLOCK 0/"CONTROL" HERE

		if (threadIdx.x < WARP_SIZE)
		{
			cutArray_PhiPhi[threadIdx.x] = 0.0f;
			cutArray_BetaNorm[threadIdx.x] = 0.0f;
			cutArray_GradNorm[threadIdx.x] = 0.0f;
		}
		
		// Fetch from global to shared memory (speed boost)
		if (threadIdx.x < gridDim.x)
		{
			cutArray_PhiPhi[threadIdx.x] = ptr_internal_results[threadIdx.x];
			cutArray_BetaNorm[threadIdx.x] = ptr_internal_results_1[threadIdx.x];
			cutArray_GradNorm[threadIdx.x] = ptr_internal_results_2[threadIdx.x];
		}

		// Reduce (number of multiprocessor is less than 32)
		// and empty cells in shared tmp are initialized to zero
		// so full-reduce can be done
		if (threadIdx.x < 16)
		{
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x + 16];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  8];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  4];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  2];
			cutArray_PhiPhi[threadIdx.x] += cutArray_PhiPhi[threadIdx.x +  1];

			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x + 16];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  8];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  4];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  2];
			cutArray_BetaNorm[threadIdx.x] += cutArray_BetaNorm[threadIdx.x +  1];

			cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x + 16];
			cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  8];
			cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  4];
			cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  2];
			cutArray_GradNorm[threadIdx.x] += cutArray_GradNorm[threadIdx.x +  1];

			// write result in thread 0, block 0
			if (threadIdx.x == 0)
			{
				m_LambdaNormSq = cutArray_BetaNorm[0];
				m_DotLambdaGrad = cutArray_PhiPhi[0];
				m_GradNormSq = cutArray_GradNorm[0];
	
				__threadfence();
			}
		}
		
	SYNCALL_END();

	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
	{
		float reg_m_a_x = 0.0f;
		float reg_m_a_y = 0.0f;
		float reg_m_a_z = 0.0f;

		unsigned int JointTripleNum, JointNodeOffset;
		unsigned int JointIndex;

		JointTripleNum = ptr_m_NodesJointsNum_00[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_00[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch( texref_m_NodesJoints_00, JointNodeOffset + j );
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_00, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_01, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_02, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_01[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_01[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_01, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_03, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_04, JointIndex ) * vec_component;
			reg_m_a_z += tex1Dfetch( texref_m_Asp_05, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_02[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_02[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_02, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_06, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_07, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_08, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_03[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_03[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_03, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_09, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_10, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_11, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_04[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_04[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_04, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_12, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_13, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_14, JointIndex ) * vec_component;
		}

		JointTripleNum = ptr_m_NodesJointsNum_05[i];
		JointNodeOffset = ptr_m_NodesJointsOffset_05[i];
		for (int j = 0; j < JointTripleNum; ++j)
		{
			JointIndex = tex1Dfetch(texref_m_NodesJoints_05, JointNodeOffset + j);
			vec_component = ptr_m_lambda[JointIndex];
			reg_m_a_x += tex1Dfetch( texref_m_Asp_15, JointIndex ) * vec_component;
			reg_m_a_y += tex1Dfetch( texref_m_Asp_16, JointIndex ) * vec_component; 
			reg_m_a_z += tex1Dfetch( texref_m_Asp_17, JointIndex ) * vec_component;
		}

		ptr_m_a_x[i] = reg_m_a_x;
		ptr_m_a_y[i] = reg_m_a_y;
		ptr_m_a_z[i] = reg_m_a_z;
	}

	SYNCALL_BEGIN();
	SYNCALL_END();
}

extern "C"
__global__ void Integrate( unsigned int m_NumNodes, float dt )
{
	int threadInWarp = threadIdx.x & (WARP_SIZE - 1);
	int warpNum = threadIdx.x / WARP_SIZE;

	for (int i = threadInWarp + (blockIdx.x + warpNum * gridDim.x) * WARP_SIZE; i < m_NumNodes; i += blockDim.x * gridDim.x)
	{
		ptr_m_NodeVel_x[i] = ptr_m_a_x[i] + ptr_m_Ftot_x[i];
		ptr_m_NodeVel_y[i] = ptr_m_a_y[i] + ptr_m_Ftot_y[i];
		ptr_m_NodeVel_z[i] = ptr_m_a_z[i] + ptr_m_Ftot_z[i];

		if (ptr_m_IsRotational[i])
		{
			float orient_x, orient_y, orient_z, orient_w;
			float reg_temp = dt * 0.5f;

			float vel_x, vel_y, vel_z;
			vel_x = ptr_m_NodeVel_x[i];
			vel_y = ptr_m_NodeVel_y[i];
			vel_z = ptr_m_NodeVel_z[i];

			float old_orient_w, old_orient_x, old_orient_y, old_orient_z;

			old_orient_w = ptr_m_NodePosRot_w[i];
			old_orient_x = ptr_m_NodePosRot_x[i];
			old_orient_y = ptr_m_NodePosRot_y[i];
			old_orient_z = ptr_m_NodePosRot_z[i];

			// orient += (1/2) * dt * (velocity * orient)
			orient_w = old_orient_w + reg_temp *
				(-vel_x * old_orient_x -
				  vel_y * old_orient_y -
				  vel_z * old_orient_z);

			orient_x = old_orient_x + reg_temp *
				( vel_x * old_orient_w +
				  vel_y * old_orient_z -
				  vel_z * old_orient_y);

			orient_y = old_orient_y + reg_temp *
				( vel_y * old_orient_w +
				  vel_z * old_orient_x -
				  vel_x * old_orient_z);

			orient_z = old_orient_z + reg_temp *
				( vel_z * old_orient_w +
				  vel_x * old_orient_y -
				  vel_y * old_orient_x);

			// Magnitude inversed
			reg_temp = 1.0f / sqrtf(orient_w*orient_w + orient_x*orient_x +
									orient_y*orient_y + orient_z*orient_z);

			orient_w *= reg_temp;
			orient_x *= reg_temp;
			orient_y *= reg_temp;
			orient_z *= reg_temp;

			// Normalized Quaternion
			ptr_m_NodePosRot_w[i] = orient_w;
			ptr_m_NodePosRot_x[i] = orient_x;
			ptr_m_NodePosRot_y[i] = orient_y;
			ptr_m_NodePosRot_z[i] = orient_z;

			// Rotational matrix
			float	Rot_00, Rot_01, Rot_02,
					Rot_10, Rot_11, Rot_12,
					Rot_20, Rot_21, Rot_22;

			// Qoaternion to matrix
			float dx  = 2.0f * orient_x;
			float dy  = 2.0f * orient_y;
			float dz  = 2.0f * orient_z;

			float dwx = dx * orient_w;
			float dwy = dy * orient_w;
			float dwz = dz * orient_w;

			float dxy = dx * orient_y;
			float dxz = dx * orient_z;
			float dyz = dy * orient_z;

			float dxx = dx * orient_x;
			float dyy = dy * orient_y;
			float dzz = dz * orient_z;

			Rot_00 = 1.0f - dyy - dzz;
			Rot_01 = dxy  - dwz;
			Rot_02 = dxz  + dwy;

			Rot_10 = dxy  + dwz;
			Rot_11 = 1.0f - dxx - dzz;
			Rot_12 = dyz  - dwx;

			Rot_20 = dxz  - dwy;
			Rot_21 = dyz  + dwx;
			Rot_22 = 1.0f - dxx - dyy;



			// Calculate rotated tensor

			// 1st ROW
			ptr_m_NodeInvMass_00[i] = 
				(Rot_00 * ptr_m_NodeInvMass0_00[i] + Rot_01 * ptr_m_NodeInvMass0_10[i] + Rot_02 * ptr_m_NodeInvMass0_20[i]) * Rot_00 + 
				(Rot_00 * ptr_m_NodeInvMass0_01[i] + Rot_01 * ptr_m_NodeInvMass0_11[i] + Rot_02 * ptr_m_NodeInvMass0_21[i]) * Rot_01 + 
				(Rot_00 * ptr_m_NodeInvMass0_02[i] + Rot_01 * ptr_m_NodeInvMass0_12[i] + Rot_02 * ptr_m_NodeInvMass0_22[i]) * Rot_02;

			ptr_m_NodeInvMass_01[i] = 
				(Rot_00 * ptr_m_NodeInvMass0_00[i] + Rot_01 * ptr_m_NodeInvMass0_10[i] + Rot_02 * ptr_m_NodeInvMass0_20[i]) * Rot_10 + 
				(Rot_00 * ptr_m_NodeInvMass0_01[i] + Rot_01 * ptr_m_NodeInvMass0_11[i] + Rot_02 * ptr_m_NodeInvMass0_21[i]) * Rot_11 + 
				(Rot_00 * ptr_m_NodeInvMass0_02[i] + Rot_01 * ptr_m_NodeInvMass0_12[i] + Rot_02 * ptr_m_NodeInvMass0_22[i]) * Rot_12;

			ptr_m_NodeInvMass_02[i] = 
				(Rot_00 * ptr_m_NodeInvMass0_00[i] + Rot_01 * ptr_m_NodeInvMass0_10[i] + Rot_02 * ptr_m_NodeInvMass0_20[i]) * Rot_20 + 
				(Rot_00 * ptr_m_NodeInvMass0_01[i] + Rot_01 * ptr_m_NodeInvMass0_11[i] + Rot_02 * ptr_m_NodeInvMass0_21[i]) * Rot_21 + 
				(Rot_00 * ptr_m_NodeInvMass0_02[i] + Rot_01 * ptr_m_NodeInvMass0_12[i] + Rot_02 * ptr_m_NodeInvMass0_22[i]) * Rot_22;


			// 2nd ROW
			ptr_m_NodeInvMass_10[i] =
				(Rot_10 * ptr_m_NodeInvMass0_00[i] + Rot_11 * ptr_m_NodeInvMass0_10[i] + Rot_12 * ptr_m_NodeInvMass0_20[i]) * Rot_00 + 
				(Rot_10 * ptr_m_NodeInvMass0_01[i] + Rot_11 * ptr_m_NodeInvMass0_11[i] + Rot_12 * ptr_m_NodeInvMass0_21[i]) * Rot_01 + 
				(Rot_10 * ptr_m_NodeInvMass0_02[i] + Rot_11 * ptr_m_NodeInvMass0_12[i] + Rot_12 * ptr_m_NodeInvMass0_22[i]) * Rot_02;

			ptr_m_NodeInvMass_11[i] =
				(Rot_10 * ptr_m_NodeInvMass0_00[i] + Rot_11 * ptr_m_NodeInvMass0_10[i] + Rot_12 * ptr_m_NodeInvMass0_20[i]) * Rot_10 + 
				(Rot_10 * ptr_m_NodeInvMass0_01[i] + Rot_11 * ptr_m_NodeInvMass0_11[i] + Rot_12 * ptr_m_NodeInvMass0_21[i]) * Rot_11 + 
				(Rot_10 * ptr_m_NodeInvMass0_02[i] + Rot_11 * ptr_m_NodeInvMass0_12[i] + Rot_12 * ptr_m_NodeInvMass0_22[i]) * Rot_12;

			ptr_m_NodeInvMass_12[i] =
				(Rot_10 * ptr_m_NodeInvMass0_00[i] + Rot_11 * ptr_m_NodeInvMass0_10[i] + Rot_12 * ptr_m_NodeInvMass0_20[i]) * Rot_20 + 
				(Rot_10 * ptr_m_NodeInvMass0_01[i] + Rot_11 * ptr_m_NodeInvMass0_11[i] + Rot_12 * ptr_m_NodeInvMass0_21[i]) * Rot_21 + 
				(Rot_10 * ptr_m_NodeInvMass0_02[i] + Rot_11 * ptr_m_NodeInvMass0_12[i] + Rot_12 * ptr_m_NodeInvMass0_22[i]) * Rot_22;


			// 3rd ROW
			ptr_m_NodeInvMass_20[i] =
				(Rot_20 * ptr_m_NodeInvMass0_00[i] + Rot_21 * ptr_m_NodeInvMass0_10[i] + Rot_22 * ptr_m_NodeInvMass0_20[i]) * Rot_00 + 
				(Rot_20 * ptr_m_NodeInvMass0_01[i] + Rot_21 * ptr_m_NodeInvMass0_11[i] + Rot_22 * ptr_m_NodeInvMass0_21[i]) * Rot_01 + 
				(Rot_20 * ptr_m_NodeInvMass0_02[i] + Rot_21 * ptr_m_NodeInvMass0_12[i] + Rot_22 * ptr_m_NodeInvMass0_22[i]) * Rot_02;

			ptr_m_NodeInvMass_21[i] =
				(Rot_20 * ptr_m_NodeInvMass0_00[i] + Rot_21 * ptr_m_NodeInvMass0_10[i] + Rot_22 * ptr_m_NodeInvMass0_20[i]) * Rot_10 + 
				(Rot_20 * ptr_m_NodeInvMass0_01[i] + Rot_21 * ptr_m_NodeInvMass0_11[i] + Rot_22 * ptr_m_NodeInvMass0_21[i]) * Rot_11 + 
				(Rot_20 * ptr_m_NodeInvMass0_02[i] + Rot_21 * ptr_m_NodeInvMass0_12[i] + Rot_22 * ptr_m_NodeInvMass0_22[i]) * Rot_12;

			ptr_m_NodeInvMass_22[i] =
				(Rot_20 * ptr_m_NodeInvMass0_00[i] + Rot_21 * ptr_m_NodeInvMass0_10[i] + Rot_22 * ptr_m_NodeInvMass0_20[i]) * Rot_20 + 
				(Rot_20 * ptr_m_NodeInvMass0_01[i] + Rot_21 * ptr_m_NodeInvMass0_11[i] + Rot_22 * ptr_m_NodeInvMass0_21[i]) * Rot_21 + 
				(Rot_20 * ptr_m_NodeInvMass0_02[i] + Rot_21 * ptr_m_NodeInvMass0_12[i] + Rot_22 * ptr_m_NodeInvMass0_22[i]) * Rot_22;
		}
		else
		{
			float reg_pos;

			reg_pos = ptr_m_NodePosRot_x[i] + ptr_m_NodeVel_x[i] * dt;
			ptr_m_NodePosRot_x[i] = reg_pos;

			reg_pos = ptr_m_NodePosRot_y[i] + ptr_m_NodeVel_y[i] * dt;
			ptr_m_NodePosRot_y[i] = reg_pos;

			reg_pos = ptr_m_NodePosRot_z[i] + ptr_m_NodeVel_z[i] * dt;
			ptr_m_NodePosRot_z[i] = reg_pos;

			ptr_m_NodeInvMass_00[i] = ptr_m_NodeInvMass0_00[i];
			ptr_m_NodeInvMass_01[i] = ptr_m_NodeInvMass0_01[i];
			ptr_m_NodeInvMass_02[i] = ptr_m_NodeInvMass0_02[i];

			ptr_m_NodeInvMass_10[i] = ptr_m_NodeInvMass0_10[i];
			ptr_m_NodeInvMass_11[i] = ptr_m_NodeInvMass0_11[i];
			ptr_m_NodeInvMass_12[i] = ptr_m_NodeInvMass0_12[i];

			ptr_m_NodeInvMass_20[i] = ptr_m_NodeInvMass0_20[i];
			ptr_m_NodeInvMass_21[i] = ptr_m_NodeInvMass0_21[i];
			ptr_m_NodeInvMass_22[i] = ptr_m_NodeInvMass0_22[i];
		}
	}
}






////////////////////////////////////////////////////////////////////////////
// Template wrappers
////////////////////////////////////////////////////////////////////////////
extern "C"
__global__ void SolverKernel_4Warps( unsigned int m_NumNodes, unsigned int m_NumJoints, unsigned int m_Iterations, float m_Precision )
{
	SolverKernelT<4>(m_NumNodes, m_NumJoints, m_Iterations, m_Precision);
}

extern "C"
__global__ void SolverKernel_8Warps( unsigned int m_NumNodes, unsigned int m_NumJoints, unsigned int m_Iterations, float m_Precision )
{
	SolverKernelT<8>(m_NumNodes, m_NumJoints, m_Iterations, m_Precision);
}

extern "C"
__global__ void ComputeFEMJacobians_4Warps( unsigned int m_Num_FEM_Joints, float dt )
{
	ComputeFEMJacobiansT<4>(m_Num_FEM_Joints, dt);
}

extern "C"
__global__ void ComputeFEMJacobians_8Warps( unsigned int m_Num_FEM_Joints, float dt )
{
	ComputeFEMJacobiansT<8>(m_Num_FEM_Joints, dt);
}

#endif
