#include "energy.h"
#include "common.h"
#include "common_cuda.h"

#define BLOCK_SIZE 128

__global__ void calcPot(float4* pos, float4* vel, float* w, int n)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n)
	{
		float4 pos_a = pos[x];
		float pot = 0.0f;

		int i;
		for (i = 0; i != n; ++i)
		{
			float4 pos_b = pos[i];
			pot += calcGravityPot(&pos_a, &pos_b);
		}

		w[x] = pot;
	}
}

__global__ void calcKin(float4* pos, float4* vel, float* k, int n)
{
	int x = blockIdx.x*BLOCK_SIZE + threadIdx.x;

	if (x < n)
	{
		float v_len2 = dot(vel[x], vel[x]);
		k[x] = 0.5f*pos[x].w*v_len2;
	}
}

/*
void energy(float4* d_pos, float4* d_vel, int size, int iterations, float dt)
{
	dim3 dimb(BLOCK_SIZE, 1);
	dim3 dimg((size-1)/dimb.x + 1, 1);

	for (int i = 0; i != iterations; ++i)
	{
		updateVel<<<dimg, dimb>>>(d_pos, d_vel, size, dt);
		cudaThreadSynchronize();
		updatePos<<<dimg, dimb>>>(d_pos, d_vel, size, dt);
		cudaThreadSynchronize();
	}
}
*/

void energy(bodyList* bl, float* K, float* W)
{
	dim3 dimb(BLOCK_SIZE, 1);
	dim3 dimg((bl->size-1)/dimb.x + 1, 1);

	float4* d_pos;
	float4* d_vel;
	float* d_k;
	float* d_w;

	// allocate memory on gpu and copy data
	d_pos = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	d_vel = (float4*) safe_cudaMalloc(bl->size*sizeof(float4));
	d_k = (float*) safe_cudaMalloc(bl->size*sizeof(float));
	d_w = (float*) safe_cudaMalloc(bl->size*sizeof(float));
	cudaMemcpy(d_pos, bl->pos, bl->size*sizeof(float4), cudaMemcpyHostToDevice);
	cudaMemcpy(d_vel, bl->vel, bl->size*sizeof(float4), cudaMemcpyHostToDevice);

	calcPot<<<dimg, dimb>>>(d_pos, d_vel, d_w, bl->size);
	parallelSum(d_w, bl->size);
	calcKin<<<dimg, dimb>>>(d_pos, d_vel, d_k, bl->size);
	parallelSum(d_k, bl->size);

	// copy results back
	cudaMemcpy(W, d_w, sizeof(float), cudaMemcpyDeviceToHost);
	cudaMemcpy(K, d_k, sizeof(float), cudaMemcpyDeviceToHost);

	// free memory
	cudaFree(d_pos);
	cudaFree(d_vel);
	cudaFree(d_w);
	cudaFree(d_k);
}
