#include "integrate3.h"
#include "common.h"
#include "common_cuda.h"

#define BLOCK_SIZE 2

__global__ void updateAcc3(float4* pos, float4* acc, int body, int n, float dt)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n)
	{
		if (x != body)
		{
			float4 pos_a = pos[body];
			float4 pos_b = pos[x];
			acc[x] = dt*make_float4(calcGravityAccel(&pos_a, &pos_b));
		}
		else
		{
			acc[x] = make_float4(0.0f);
		}
	}
}

/*
template <class T>
__global__ void foldSum(T* f, int n, int pivot)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n && x >= pivot)
		f[x - pivot] += f[x];
}

template <class T>
void sum(T* a, int size)
{
	dim3 dimb(BLOCK_SIZE, 1);
	dim3 dimg((size-1)/dimb.x + 1, 1);

	int pivot = msb32(size-1);
	int curr_size = size;

	int i;
	for (i = pivot; i > 0; i >>= 1)
	{
		foldSum<<<dimg, dimb>>>(a, curr_size, i);
		curr_size = i;
	}
}
*/

__global__ void updateVel3(float4* vel, float4* acc, int n, float dt)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n)
		vel[x] += dt*acc[x];	// 3 flops
}

__global__ void updatePos3(float4* pos, float4* vel, int n, float dt)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n)
		pos[x] += dt*vel[x];	// 3 flops
}

void integrate3(float4* d_pos, float4* d_vel, int size, int iterations, float dt)
{
	dim3 dimb(BLOCK_SIZE, 1);
	dim3 dimg((size-1)/dimb.x + 1, 1);

	float4* acc = (float4*) safe_cudaMalloc(size*sizeof(float4));
	float4* total_acc = (float4*) safe_cudaMalloc(size*sizeof(float4));

	int i;
	for (i = 0; i != size; ++i)
	{
		updateAcc3<<<dimg, dimb>>>(d_pos, acc, i, size, dt);
		cudaThreadSynchronize();
		parallelSum(acc, size);
		cudaThreadSynchronize();
		cudaMemcpy(total_acc + i, acc, sizeof(float4), cudaMemcpyDeviceToDevice);
		cudaThreadSynchronize();
	}

	updateVel3<<<dimg, dimb>>>(d_vel, total_acc, size, dt);
	cudaThreadSynchronize();
	updatePos3<<<dimg, dimb>>>(d_pos, d_vel, size, dt);

	cudaFree(acc);
	cudaFree(total_acc);
}

void integrate3(bodyList* bl, int iterations, float dt)
{
	float4* d_pos;
	float4* d_vel;

	// allocate memory on gpu and copy data
	d_pos = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	d_vel = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	cudaMemcpy(d_pos, bl->pos, bl->size*sizeof(float4), cudaMemcpyHostToDevice);
	cudaMemcpy(d_vel, bl->vel, bl->size*sizeof(float4), cudaMemcpyHostToDevice);

	integrate3(d_pos, d_vel, bl->size, iterations, dt);

	// copy results back
	cudaMemcpy(bl->pos, d_pos, bl->size*sizeof(float4), cudaMemcpyDeviceToHost);
	cudaMemcpy(bl->vel, d_vel, bl->size*sizeof(float4), cudaMemcpyDeviceToHost);

	// free memory
	cudaFree(d_pos);
	cudaFree(d_vel);
}
