#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <cuda.h>
#include "es-bvs-bl-common.h"

#define NUM_THREADS 	256
#define NUM_BLOCKS 	30
#define WARP_SIZE	32

//#define DEBUG

extern double size;

//
//  benchmarking program
//

__device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor)
{
  double dx = neighbor.x - particle.x;
  double dy = neighbor.y - particle.y;
  double r2 = dx * dx + dy * dy;
  if( r2 > cutoff*cutoff )
      return;
  //r2 = fmax( r2, min_r*min_r );
  r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
  double r = sqrt( r2 );

  //
  //  very simple short-range repulsive force
  //
  double coef = ( 1 - cutoff / r ) / r2 / mass;
  particle.ax += coef * dx;
  particle.ay += coef * dy;
}

__global__ void zero_gpu(int *array, int length) 
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= length) return;
  array[tid] = 0;
}

// num_particles_in_bin must be zeroed before this is called
__global__ void compute_bins_gpu(particle_t *particles, int *particle_offsets_in_bins, int *num_particles_in_bin, double cell_size, int bins_dim, int n)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;
  
  int bin_x = (int)(particles[tid].x/cell_size);
  int bin_y = (int)(particles[tid].y/cell_size);
  int bin = bin_y*bins_dim+bin_x;
  particles[tid].bin = bin;
  particle_offsets_in_bins[tid] = atomicAdd(num_particles_in_bin+bin, 1);
}

__global__ void compute_forces_gpu(particle_t *particles, int *bin_starting_points, int bins_dim, int *interact_neighbors, int n)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;

  
  particle_t *main_part = particles + tid;
  main_part->ax = 0;
  main_part->ay = 0;
  if( main_part->bin + interact_neighbors[0] >= 0 && main_part->bin + interact_neighbors[8] < bins_dim*bins_dim )
  {
    for( int bin_to = 0; bin_to < 9; bin_to+=3 )
    {
      int inter_bin = main_part->bin + interact_neighbors[bin_to];
      for( int j = bin_starting_points[inter_bin]; j < bin_starting_points[inter_bin+3]; j++ )
        if( j != tid )
          apply_force_gpu( *main_part, *(particles+j) );
    }
  } else
  {
    for( int bin_to = 0; bin_to < 9; bin_to++ )
    {
      int inter_bin = main_part->bin + interact_neighbors[bin_to];
      if ( inter_bin < 0 || inter_bin >= bins_dim*bins_dim )
         continue;
      for( int j = bin_starting_points[inter_bin]; j < bin_starting_points[inter_bin+1]; j++ )
        if( j != tid )
          apply_force_gpu( *main_part, *(particles+j) );
    }
  }
}


__global__ void sort_particles_gpu (particle_t *old_particles, particle_t *new_particles, int n, int *bin_positions, int *particle_offsets)
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;

  particle_t p = old_particles[tid];
  new_particles[bin_positions[p.bin]+particle_offsets[tid]] = p;
}

__global__ void move_gpu (particle_t * particles, int n, double size)
{

  // Get thread (particle) ID
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= n) return;

  particle_t * p = &particles[tid];
    //
    //  slightly simplified Velocity Verlet integration
    //  conserves energy better than explicit Euler method
    //
    p->vx += p->ax * dt;
    p->vy += p->ay * dt;
    p->x  += p->vx * dt;
    p->y  += p->vy * dt;

    //
    //  bounce from walls
    //
    while( p->x < 0 || p->x > size )
    {
        p->x  = p->x < 0 ? -(p->x) : 2*size-p->x;
        p->vx = -(p->vx);
    }
    while( p->y < 0 || p->y > size )
    {
        p->y  = p->y < 0 ? -(p->y) : 2*size-p->y;
        p->vy = -(p->vy);
    }

}

__global__ void prefix_sum_helper_gpu(int *old_array, int *new_array, int min, int size) 
{
  int tid = threadIdx.x + blockIdx.x * blockDim.x;
  if(tid >= size) return;
  if(tid < min)
    new_array[tid] = old_array[tid];
  else
    new_array[tid] = old_array[tid] + old_array[tid-min];
}

__global__ void block_prefix_sum(int *array, int *block_sums, int size, int rec_block_sum){
  __shared__ int pref_buf[NUM_THREADS];
  __shared__ int offset_sum[1];
  int tid = threadIdx.x;
  int i, j;
  int val;
  int start = (size/gridDim.x)*blockIdx.x;
  int finish = (blockIdx.x == gridDim.x-1) ? size : (size/gridDim.x)*(blockIdx.x+1);
  int pad_size = (finish-start) + (NUM_THREADS - ((finish-start)%NUM_THREADS));
  if (tid == NUM_THREADS-1)
    offset_sum[0] = 0;
  for (i = start+tid; i < start+pad_size; i+=blockDim.x){
    pref_buf[tid] = (i < finish) ? array[i] : 0;
    for (j = 1; j < NUM_THREADS; j=j*2){
      __syncthreads();
      if (tid >= j) val = pref_buf[tid-j];
      __syncthreads();
      if (tid >= j) pref_buf[tid]+=val;    
    }
    if (i < finish) array[i] = pref_buf[tid]+offset_sum[0];    
    if (tid == NUM_THREADS-1)
      offset_sum[0] = pref_buf[tid]+offset_sum[0];
  }
  __syncthreads();
  if (tid == 0 && rec_block_sum == 1)
    block_sums[blockIdx.x] = array[finish-1];
}

__global__ void block_offset_prefix_sum(int *array, int *block_sums, int size){
  if (blockIdx.x == 0) return;
  int tid = threadIdx.x;
  int i;
  int start = (size/gridDim.x)*blockIdx.x;
  int finish = (blockIdx.x == gridDim.x-1) ? size : (size/gridDim.x)*(blockIdx.x+1);
  for (i = start+tid; i < finish; i+=blockDim.x){
    array[i] += block_sums[blockIdx.x-1];    
  }
}

// array_copy doesn't have to have the same data in it, it is just temp space of the same size
void prefix_sum( int *array, int *array_copy, int size )
{
  /*int blks = (size + NUM_THREADS - 1) / NUM_THREADS;
  for( int i = 0; i < log(size)/log(2.0); i+=2 ) {
	prefix_sum_helper_gpu <<< blks, NUM_THREADS >>> (array, array_copy, 1 << i, size);
	prefix_sum_helper_gpu <<< blks, NUM_THREADS >>> (array_copy, array, 2 << i, size);
  }*/
  int blks = (size + NUM_THREADS - 1) / NUM_THREADS;
  if (blks > NUM_BLOCKS) blks = NUM_BLOCKS;
  block_prefix_sum <<< blks, NUM_THREADS >>> (array, array_copy, size, 1);
  block_prefix_sum <<< 1, NUM_THREADS >>> (array_copy, array_copy, blks, 0);
  block_offset_prefix_sum <<< blks, NUM_THREADS >>> (array, array_copy, size);
  
}

int main( int argc, char **argv )
{    
    // This takes a few seconds to initialize the runtime
    cudaThreadSynchronize(); 

    if( find_option( argc, argv, "-h" ) >= 0 )
    {
        printf( "Options:\n" );
        printf( "-h to see this help\n" );
        printf( "-n <int> to set the number of particles\n" );
        printf( "-o <filename> to specify the output file name\n" );
        return 0;
    }
    
    int n = read_int( argc, argv, "-n", 1000 );

    char *savename = read_string( argc, argv, "-o", NULL );
    
    FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
    particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );

    // GPU particle data structure
    particle_t * d_particles;
    cudaMalloc((void **) &d_particles, n * sizeof(particle_t));
    particle_t * d_particles_new;
    cudaMalloc((void **) &d_particles_new, n * sizeof(particle_t));

    set_size( n );
    const double cell_size = MIN_CELL_SIZE;
    const int bins_dim = (int)((size+cell_size)/cell_size);

    // GPU bin data structures
    int *particle_offsets_in_bins;
    cudaMalloc((void **) &particle_offsets_in_bins, n * sizeof(int));
    int *num_particles_in_bin;
    cudaMalloc((void **) &num_particles_in_bin, (bins_dim * bins_dim +1) * sizeof(int));
    int *num_particles_in_bin_copy;
//    cudaMalloc((void **) &num_particles_in_bin_copy, (bins_dim * bins_dim +1) * sizeof(int));
    cudaMalloc((void **) &num_particles_in_bin_copy, NUM_BLOCKS * sizeof(int));

    // There is probably a less stupid way to initialize this...
    int cpu_interact_neighbors[9] = {(-bins_dim-1), (-bins_dim), (-bins_dim+1), -1, 0, 1, (bins_dim-1), (bins_dim), (bins_dim+1)};
    int *interact_neighbors;
    cudaMalloc((void **) &interact_neighbors, 9*sizeof(int));
    cudaMemcpy(interact_neighbors, cpu_interact_neighbors, 9 * sizeof(int), cudaMemcpyHostToDevice);

    init_particles( n, particles );

#ifdef DEBUG
    printf("Cell size %f, bins_dim %d\n", cell_size, bins_dim);
    printf("Initial particle positions\n");
    for( int i = 0; i < n; i++)
	printf("%1.4f %1.4f\n", particles[i].x, particles[i].y);
#endif

    cudaThreadSynchronize();
    double copy_time = read_timer( );

    // Copy the particles to the GPU
    cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice);

    cudaThreadSynchronize();
    copy_time = read_timer( ) - copy_time;
    
    //
    //  simulate a number of time steps
    //
    cudaThreadSynchronize();
    double simulation_time = read_timer( );
    int blks = (n + NUM_THREADS - 1) / NUM_THREADS;

    for( int step = 0; step < NSTEPS; step++ )
    {
	// Zero out num_particles_in_bin;
	cudaMemset(num_particles_in_bin, 0, (bins_dim*bins_dim+1)*sizeof(int));
	
	// Step 1 of binning particles
	compute_bins_gpu <<< blks, NUM_THREADS >>> (d_particles, particle_offsets_in_bins, num_particles_in_bin+1, cell_size, bins_dim, n);
	cudaThreadSynchronize();

#ifdef DEBUG	
	printf("compute_bins_gpu complete in time-step %d\n", step);
	int cpu_num[bins_dim*bins_dim+1];
        cudaMemcpy(cpu_num, num_particles_in_bin, (bins_dim*bins_dim+1)*sizeof(int), cudaMemcpyDeviceToHost);
	for( int i = 0; i < bins_dim*bins_dim+1; i++ )
	  printf("%d ", cpu_num[i]);
	printf("\n");
        cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
	for( int i = 0; i < n; i++ )
	  printf("%d %2.4f %2.4f %2.4f %2.4f %2.4f %2.4f %d\n", i, particles[i].x, particles[i].y, particles[i].vx, particles[i].vy, particles[i].ax, particles[i].ay, particles[i].bin);
#endif

	// Compute the prefix sums of num_particles_in_bin
	prefix_sum (num_particles_in_bin, num_particles_in_bin_copy, bins_dim*bins_dim+1);
        cudaThreadSynchronize();
	// Now num_particles_in_bin is rather the index in d_particles (if it were sorted) of the starting point of each bin.  For convenience it also has a last entry of n
  	// Now we need to sort d_particles.  The position of particle i should be num_particles_in_bin[d_particles[i].bin]+particle_offsets_in_bins[i].
	sort_particles_gpu <<< blks, NUM_THREADS >>> (d_particles, d_particles_new, n, num_particles_in_bin, particle_offsets_in_bins);
	//cudaThreadSynchronize();
	particle_t *temp = d_particles;
	d_particles = d_particles_new;
	d_particles_new = temp;

#ifdef DEBUG
        cudaThreadSynchronize();
	printf("sorting complete in time-step %d\n", step);
        cudaMemcpy(cpu_num, num_particles_in_bin, (bins_dim*bins_dim+1)*sizeof(int), cudaMemcpyDeviceToHost);
	for( int i = 0; i < bins_dim*bins_dim+1; i++ )
	  printf("%d ", cpu_num[i]);
	printf("\n");
        cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
	for( int i = 0; i < n; i++ )
	  printf("%d %2.4f %2.4f %2.4f %2.4f %2.4f %2.4f %d\n", i, particles[i].x, particles[i].y, particles[i].vx, particles[i].vy, particles[i].ax, particles[i].ay, particles[i].bin);
#endif

        //
        //  compute forces
        //
	compute_forces_gpu <<< blks, NUM_THREADS >>> (d_particles, num_particles_in_bin, bins_dim, interact_neighbors, n);
	//cudaThreadSynchronize();
        
#ifdef DEBUG	
        cudaThreadSynchronize();
	printf("compute_forces_gpu complete in time-step %d\n", step);
        cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
	for( int i = 0; i < n; i++ )
	  printf("%d %2.4f %2.4f %2.4f %2.4f %2.4f %2.4f %d\n", i, particles[i].x, particles[i].y, particles[i].vx, particles[i].vy, particles[i].ax, particles[i].ay, particles[i].bin);
#endif
        //
        //  move particles
        //
	move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size);
        
#ifdef DEBUG	
	printf("move_gpu complete in time-step %d\n", step);
	exit(0);
#endif
        //
        //  save if necessary
        //
        if( fsave && (step%SAVEFREQ) == 0 ) {
	    // Copy the particles back to the CPU
            cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
	    qsort( particles, n, sizeof(particle_t), order_particles );
            save( fsave, n, particles);
	}
    }
    cudaThreadSynchronize();
    simulation_time = read_timer( ) - simulation_time;
    
    printf( "CPU-GPU copy time = %g seconds\n", copy_time);
    printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
    
    free( particles );
    cudaFree(d_particles);
    cudaFree(d_particles_new);
    cudaFree(particle_offsets_in_bins);
    cudaFree(num_particles_in_bin);
    cudaFree(num_particles_in_bin_copy);
    if( fsave )
        fclose( fsave );
    
    return 0;
}
