#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <cmath>
#include <cuda.h>
#include "es-bvs-bl-common.h"

#define NUM_THREADS 256
#define MAX_BIN 20

//#define DEBUG

extern double size;

//
//  benchmarking program
//

__global__ void setBinPointers(particle_t** binArray, particle_t*** bins, int numBins)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= numBins) return;
  bins[tid] = binArray+tid*MAX_BIN;
}
  
__device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor)
{
  double dx = neighbor.x - particle.x;
  double dy = neighbor.y - particle.y;
  double r2 = dx * dx + dy * dy;
  if( r2 > cutoff*cutoff )
      return;
  //r2 = fmax( r2, min_r*min_r );
  r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
  double r = sqrt( r2 );

  //
  //  very simple short-range repulsive force
  //
  double coef = ( 1 - cutoff / r ) / r2 / mass;
  particle.ax += coef * dx;
  particle.ay += coef * dy;

}

__global__ void zero_gpu(int *array, int length) 
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= length) return;
  array[tid] = 0;
}

// num_particles_in_bin must be zeroed before this is called
__global__ void compute_bins_gpu(particle_t *particles, particle_t*** bins, 
				 int *num_particles_in_bin, double cell_size, int bins_dim, int n)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;
  

  int bin_x = (int)(particles[tid].x/cell_size);
  int bin_y = (int)(particles[tid].y/cell_size);
  int bin = bin_y*bins_dim+bin_x;

  int index = atomicAdd(num_particles_in_bin+bin, 1);
  particle_t** thisBin = bins[bin];
  thisBin[index] = particles+tid;
}

void compute_bins_cpu(particle_t *particles,  int *num_particles_in_bin, double cell_size, int bins_dim, int n)
{
  for(int i=0; i<bins_dim*bins_dim; i++) num_particles_in_bin[i]=0;

  for(int i=0; i<n; i++)
    {
      particle_t& p = particles[i];
      
      int bin_x = (int)(p.x/cell_size);
      int bin_y = (int)(p.y/cell_size);
      int bin = bin_y*bins_dim+bin_x;

      *(num_particles_in_bin+bin)+= 1;
    }
}
__global__ void compute_forces_gpu(particle_t*** bins, int* num_particles_in_bin, int bins_dim, int *interact_neighbors, int n)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= bins_dim*bins_dim) return;
  
  for( int i =0; i<num_particles_in_bin[tid]; i++ ) 
  {
    particle_t& main_part = *(bins[tid][i]);
    main_part.ax = 0;
    main_part.ay = 0;
    for( int bin_to = 0; bin_to < 9; bin_to++ )
    {
      int inter_bin = tid + interact_neighbors[bin_to];
      if ( inter_bin < 0 || inter_bin >= bins_dim*bins_dim )
	continue;
      for( int j =0; j<num_particles_in_bin[inter_bin]; j++ )
      {
        particle_t& inter_part = *(bins[inter_bin][j]);
	apply_force_gpu( main_part, inter_part );
      }
    }
  }
}

__global__ void move_gpu (particle_t * particles, int n, double size)
{

  // Get thread (particle) ID
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;

  particle_t * p = &particles[tid];
    //
    //  slightly simplified Velocity Verlet integration
    //  conserves energy better than explicit Euler method
    //
    p->vx += p->ax * dt;
    p->vy += p->ay * dt;
    p->x  += p->vx * dt;
    p->y  += p->vy * dt;

    //
    //  bounce from walls
    //
    while( p->x < 0 || p->x > size )
    {
        p->x  = p->x < 0 ? -(p->x) : 2*size-p->x;
        p->vx = -(p->vx);
    }
    while( p->y < 0 || p->y > size )
    {
        p->y  = p->y < 0 ? -(p->y) : 2*size-p->y;
        p->vy = -(p->vy);
    }

}


int main( int argc, char **argv )
{    
    // This takes a few seconds to initialize the runtime
    cudaThreadSynchronize(); 

    if( find_option( argc, argv, "-h" ) >= 0 )
    {
        printf( "Options:\n" );
        printf( "-h to see this help\n" );
        printf( "-n <int> to set the number of particles\n" );
        printf( "-o <filename> to specify the output file name\n" );
	printf( "-i <filename> to specify the intial condition output file name\n" );
        return 0;
    }
    
    int n = read_int( argc, argv, "-n", 1000 );

    char *savename = read_string( argc, argv, "-o", NULL );
    
    FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
    particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );

    // GPU particle data structure
    particle_t * d_particles;
    cudaMalloc((void **) &d_particles, n * sizeof(particle_t));

    set_size( n );
    const double cell_size = MIN_CELL_SIZE;
    const int bins_dim = (int)((size+cell_size)/cell_size);

    // GPU bin data structures
    particle_t** binArray;
    particle_t*** bins;
    int *num_particles_in_bin;
    int blks_particles = (n + NUM_THREADS - 1) / NUM_THREADS;
    int blks_bins = (bins_dim*bins_dim + NUM_THREADS - 1) / NUM_THREADS;
    int numBins = bins_dim*bins_dim;

    printf("blks_particles, blks_bins, numBins, %d %d %d \n",blks_particles, blks_bins, numBins);

    cudaMalloc((void ***) &binArray, MAX_BIN*numBins* sizeof(particle_t*));
    cudaMalloc((void ****) &bins, numBins* sizeof(particle_t**));
    cudaMalloc((void **) &num_particles_in_bin, numBins * sizeof(int));

    // There is probably a less stupid way to initialize this...
    int cpu_interact_neighbors[9] = {(-bins_dim-1), (-bins_dim), (-bins_dim+1), -1, 0, 1, (bins_dim-1), (bins_dim), (bins_dim+1)};
    int *interact_neighbors;
    cudaMalloc((void **) &interact_neighbors, 9*sizeof(int));
    cudaMemcpy(interact_neighbors, cpu_interact_neighbors, 9 * sizeof(int), cudaMemcpyHostToDevice);

    init_particles( n, particles );

    char *initname = read_string( argc, argv, "-i", NULL );   
    FILE *isave = initname ? fopen( initname, "w" ) : NULL;
    if( isave ) {
      save( isave, n, particles);
    }

    cudaThreadSynchronize();
    double copy_time = read_timer( );

    // Copy the particles to the GPU
    cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice);

    cudaThreadSynchronize();
    copy_time = read_timer( ) - copy_time;
    int count = numBins+NUM_THREADS/NUM_THREADS;

    setBinPointers <<< blks_bins, NUM_THREADS >>>(binArray, bins, numBins);
 
 
    //
    //  simulate a number of time steps
    //
    cudaThreadSynchronize();
    double simulation_time = read_timer( );
    // int cpu_num_particles_in_bin[numBins];
    int blks = (n + NUM_THREADS - 1) / NUM_THREADS;
    for( int step = 1; step < NSTEPS+1; step++ )
    {

 
	// Zero out num_particles_in_bin;
	zero_gpu <<< blks_bins, NUM_THREADS >>> (num_particles_in_bin, numBins);
	if ( cudaSuccess != cudaGetLastError() )
	  printf( "Error!\n" );

	cudaThreadSynchronize();
// 	for(int i=0; i<numBins; i++) cpu_num_particles_in_bin[i] = 0;
//         cudaMemcpy(cpu_num_particles_in_bin, num_particles_in_bin, (numBins) * sizeof(int), cudaMemcpyDeviceToHost);
// 	printf("num_particles_in_bin after zero\n");
// 	for( int i = 0; i < numBins; i++ )
// 	  printf("%d ", cpu_num_particles_in_bin[i]);
// 	printf("\n");
	
	// Step 1 of binning particles
	compute_bins_gpu <<< blks_bins, NUM_THREADS >>> (d_particles, bins, num_particles_in_bin, cell_size, bins_dim, n);
	if ( cudaSuccess != cudaGetLastError() )
	  printf( "Error!\n" );
	cudaThreadSynchronize();
	
	//	for(int i=0; i<numBins; i++) cpu_num_particles_in_bin[i] = 0;

//         cudaMemcpy(cpu_num_particles_in_bin, num_particles_in_bin, (numBins) * sizeof(int), cudaMemcpyDeviceToHost);
// 	printf("num_particles_in_bin after bin\n");
// 	for( int i = 0; i < numBins; i++ )
// 	  printf("%d ", cpu_num_particles_in_bin[i]);
// 	printf("\n");

	//	cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);

// 	printf("\n");
// 	compute_bins_cpu(particles, cpu_num_particles_in_bin, cell_size, bins_dim, n);
// 	printf("num_particles_in_bin:cpu\n");
// 	for( int i = 0; i < numBins; i++ )
// 	  printf("%d ", cpu_num_particles_in_bin[i]);
// 	printf("\n");


        //
        //  compute forces
        //
	compute_forces_gpu <<< blks_bins, NUM_THREADS >>> (bins, num_particles_in_bin, bins_dim, interact_neighbors, n);
	if ( cudaSuccess != cudaGetLastError() )
	  printf( "Error!\n" );
	cudaThreadSynchronize();      
// 	printf("[p.x,p.y]=");
// 	cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
// 	for(int i=0; i<n; i++) printf("[%g,%g] ",particles[i].x, particles[i].y);
// 	printf("\n");


        //  move particles
        //
	move_gpu <<< blks_particles, NUM_THREADS >>> (d_particles, n, size);
        
        //
        //  save if necessary
        //

	cudaThreadSynchronize();      


        if( fsave && (step%SAVEFREQ) == 0 ) {
	  printf("saving %d\n", step);
	  // Copy the particles back to the CPU
	  cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
	  save( fsave, n, particles);
	}
    }
    cudaThreadSynchronize();
    simulation_time = read_timer( ) - simulation_time;
    
    printf( "CPU-GPU copy time = %g seconds\n", copy_time);
    printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
    
    free( particles );
    cudaFree(d_particles);
    if( fsave )
        fclose( fsave );
    
    return 0;
}
