#include "integrate2.h"
#include "common.h"
#include "common_cuda.h"

#define BLOCK_WIDTH 256

#define MAX_NUM_STARS 33553920

/****************************************************************************************************
for calculate accelerations
****************************************************************************************************/

//__device__ tile
__device__ float3
tile_calculation(float4* myPosition, float4* shPosition, float3* accel, unsigned int tileId, unsigned int restNum)
{
	int num = blockDim.x;

    if(restNum != 0 && tileId == (gridDim.x-1))
		num = restNum;

    int i;
	for(i = 0; i < num; i++){
		//*accel = calculateUpdatedAccel(accel, myPosition, &shPosition[i]);
		*accel += calcGravityAccel(myPosition, &shPosition[i]);
	}

    return *accel;
}

//__global__ do the calculation
__global__ void 
updateVel2(float4* globalX, float4* globalA, int N, unsigned int restNum)
{
    __shared__ float4 shPosition[BLOCK_WIDTH];
    
    int i = 0;
    int tile = 0;
    int gtid = blockIdx.x*blockDim.x + threadIdx.x;
    float3 acc = make_float3(0.0f);
    float4 myPosition = globalX[gtid];

	for(i = 0, tile = 0; i < N; i +=blockDim.x, tile++){
        
        if(gtid < N){
            int idx = tile*blockDim.x + threadIdx.x;
            if(idx < N){
                shPosition[threadIdx.x] = globalX[idx];
            }
        }
        
        
        __syncthreads();
         
         if(gtid < N){
             acc = tile_calculation(&myPosition, shPosition, &acc, tile, restNum);
         }
        
        __syncthreads();
    }
    //save the result in global memory for the integration step
    if(gtid < N){
        globalA[gtid] += make_float4(acc);
    }
}

void integrate2(float4* d_pos, float4* d_vel, int size, int iterations, float dt)
{
    //cpu
    unsigned int restNum = 0;    
    
    //calculate grid width and see if there needs an extra block
	unsigned int GRID_WIDTH = size/BLOCK_WIDTH;
	restNum = size%BLOCK_WIDTH;
	if(restNum!=0)
		GRID_WIDTH += 1;
    
    //determine the grid and block size
    dim3 dimb(BLOCK_WIDTH, 1);
    dim3 dimg(GRID_WIDTH, 1);
    
    //call the kernels to do the calculation
    for (int i = 0; i != iterations; ++i)
    {
        updateVel2<<<dimg, dimb>>>(d_pos, d_vel, size, restNum);
        cudaThreadSynchronize();
        updatePos<<<dimg, dimb>>>(d_pos, d_vel, size, dt);
        cudaThreadSynchronize();
    }
}

void integrate2(bodyList* bl, int iterations, float dt)
{   
	float4* d_pos;
	float4* d_vel;

	// allocate memory on gpu and copy data
	d_pos = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	d_vel = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	cudaMemcpy(d_pos, bl->pos, bl->size*sizeof(float4), cudaMemcpyHostToDevice);
	cudaMemcpy(d_vel, bl->vel, bl->size*sizeof(float4), cudaMemcpyHostToDevice);

	integrate2(d_pos, d_vel, bl->size, iterations, dt);

	// copy results back
	cudaMemcpy(bl->pos, d_pos, bl->size*sizeof(float4), cudaMemcpyDeviceToHost);
	cudaMemcpy(bl->vel, d_vel, bl->size*sizeof(float4), cudaMemcpyDeviceToHost);

	// free memory
	cudaFree(d_pos);
	cudaFree(d_vel);
}
