/*
 * bi is particle i, wich in turn is the particle where "I am"
 */

#include <cuda.h>
#include "cuda_ffdot.h"

__device__ FD
ffdot_ij( float4 bi, float4 bj, float4 vi, float4 vj, FD frfd, float eps2 )
{
	
	float3 DR;
	float3 DV;
	
	float RIJ2,DR3I,DRDV;
	
	DR.x = bj.x - bi.x; DR.y = bj.y - bi.y; DR.z = bj.z - bi.z;
	DV.x = vj.x - vi.x; DV.y = vj.y - vi.y; DV.z = vj.z - vi.z;
	
	RIJ2 = DR.x*DR.x + DR.y*DR.y + DR.z*DR.z + eps2; 
	DR3I = bj.w/( RIJ2*sqrtf(RIJ2) );
	
	DRDV = 3.0*(DR.x*DV.x + DR.y*DV.y + DR.z*DV.z )/RIJ2;
	
	frfd.fr.x += DR.x*DR3I; 
	frfd.fr.y += DR.y*DR3I; 
	frfd.fr.z += DR.z*DR3I; 
	
	frfd.fd.x += (DV.x - DR.x*DRDV)*DR3I;
	frfd.fd.y += (DV.y - DR.y*DRDV)*DR3I;
	frfd.fd.z += (DV.z - DR.z*DRDV)*DR3I;
    
    return frfd;
}

__device__ FD
ffdot_tile( float4 bi, float4 vi, FD frfd, float eps2 )
{
	
	unsigned long i;
	extern __shared__ RV sh_rv[];
	
	for( i = 0; i < blockDim.x; i++ )
    {
		frfd = ffdot_ij( bi, sh_rv[i].R, vi, sh_rv[i].V, frfd, eps2 );
	}
    
    return frfd;
}

__global__ void
ffdot_grid( float4 *X, float4 *V, float3 *F, float3 *FDOT, int *NEXT, float eps2, long Np )
{
	extern __shared__ RV sh_rv[];
	
	float4 bi;
	float4 vi;
	
	float3 fi  = {0.0f,0.0f,0.0f};
	float3 fdi = {0.0f,0.0f,0.0f};
	
    FD frfd;
    frfd.fr = fi;
    frfd.fd = fdi;
    
	int gtid = blockIdx.x * blockDim.x + threadIdx.x; 
	
	bi = X[gtid];
	vi = V[gtid];
	
	int i,tile;
	for( i=0, tile=0; i<Np; i+=TPB, tile++ )
	{
		int idx = tile * blockDim.x + threadIdx.x;
		
		sh_rv[threadIdx.x].R = X[idx];
		sh_rv[threadIdx.x].V = V[idx];
		
		__syncthreads();
		
		/*
		 * We only need to calcule F and FDOT for particles that
		 * will be updated.
		 * By not calling this routine, we save ~100 arithmetical operations
		 * and we do not interrupt the __syncthreads() call, which is
		 * a critical part of the scheme.
		 */
		if( NEXT[gtid] == 1 )
			frfd = ffdot_tile( bi, vi, frfd, eps2 );
		
		__syncthreads();
	}
	
    F[gtid] = frfd.fr;
    
    FDOT[gtid] = frfd.fd;
    /*
	F[gtid].x    = frfd.fr.x;  
	F[gtid].y    = frfd.fr.y;  
	F[gtid].z    = frfd.fr.z;
	
	FDOT[gtid].x = frfd.fd.x; 
	FDOT[gtid].y = frfd.fd.y; 
	FDOT[gtid].z = frfd.fd.z;
    *
    */
}
extern "C" void
			/*This variables are stored in normal RAM*/
cuda_ffdot ( float *X, float *V, float *F, float *FDOT, int *NEXT, float eps2, long Np,
            /*This variables are stored in GPU RAM*/
            float4 *X_d, float4 *V_d, float3 *F_d, float3 *FDOT_d, int *NEXT_d )

{
	/*
	 * First, copy arrays in CPU RAM to GPU RAM
	 */
	 
	cudaMemcpy( X_d, (float4 *)X  , sizeof(float4)*Np, cudaMemcpyHostToDevice );
	cudaMemcpy( V_d, (float4 *)V  , sizeof(float4)*Np, cudaMemcpyHostToDevice );
	
	cudaMemcpy( NEXT_d, NEXT      , sizeof(   int)*Np, cudaMemcpyHostToDevice ); 
	
	dim3 threads(TPB,1,1);
	dim3 grid((Np + (TPB-1))/TPB, 1, 1);
	int sharedMemSize = TPB * sizeof(RV);
	
	ffdot_grid <<< grid, threads, sharedMemSize >>> ( X_d, V_d, F_d, FDOT_d, NEXT_d, eps2, Np );
	
	cudaMemcpy( F     , (float  *)F_d   , sizeof(float)*Np*3, cudaMemcpyDeviceToHost);
	cudaMemcpy( FDOT  , (float  *)FDOT_d, sizeof(float)*Np*3, cudaMemcpyDeviceToHost);
		
}
