#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <omp.h>
#include <limits.h>

#include <cassert>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <numeric>
#include <algorithm>

#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"


using namespace std;

template <class Key, class Val>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op);

// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
//   Add benchmark specific options parsing
//
// Arguments:
//   op: the options parser / parameter database
//
// Returns:  nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void
addBenchmarkSpecOptions(OptionParser &op)
{
    op.addOption("mb", OPT_INT, "0", "data size (in MB)");
    // The oversampling factor basically just specifies how
    // many samples each rank should take as a percentage of
    // the total number of samples that belong to the rank.
    // Eg. .0025 = size * 0.0025 = 0.25% sampling rate
    op.addOption("samples", OPT_FLOAT, "0.00", "number of samples");

}




// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
//   Driver for the generic benchmark. Templated for testing in different 
//   precition.
//
// Arguments:
//   resultDB: results from the benchmark are stored in this db
//   op: the options parser / parameter database
//
// Returns:  nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
    RunTest<long long, long long>("long", resultDB, op);
}

void
exclusive_scan (int n, int* in, int* result)
{
	int val;
	val = 0;

	for (int i = 0; i < n; i++) { 
		result[i] = val; 
		val = val + in[i];
	}
}

/* ------------------------------------------------------------------------
 */

template <class Key, class Val>
int
bin_by_key (Key* keys, Val* vals, Key* keys_t, Val* vals_t, Key* splitters, int size, int nb)
{
	int num_threads;

#pragma omp parallel 
	{ 
		num_threads = omp_get_num_threads();
	}

	int* counts;
	int* t_counts;

	counts = (int*) malloc ((nb+3) * num_threads * sizeof (int));
	t_counts = (int*) malloc ((nb+3) * num_threads * sizeof (int));

#pragma omp parallel 
	{
		int idx;
		int tid = omp_get_thread_num();
		int num_threads = omp_get_num_threads();
		int bi = tid * (nb + 3);
		int beg = tid * size/num_threads;
		int end = (tid == num_threads-1) ? size : (tid+1) * size/num_threads; 

		/* Clear bucket counts */
		for (int i = 0; i < nb+3; i++) {
			counts[bi + i] = 0;
		}

		/* Compute histogram */
		for (int i = beg; i < end; i++) { 
			for (int j = 0; j < nb; j++) 
				if (keys[i] < splitters[j]) {
					idx = j;
					break;
				}
			counts[bi+idx]++; 
		}
#pragma omp barrier

		/* Compute prefix-sums */
		int b = tid * ((nb+3)/num_threads);
		int e =  (tid ==  nb-1) ? (nb+3) : (tid+1) * ((nb+3)/num_threads);
		for (int i = b; i < e; i++) {
			int last = t_counts[i] = counts[i];
			for (int j = 1; j < num_threads; j++) {
				int temp = t_counts[j*(nb+3) + i] = last + counts[j*(nb+3) + i];
				last = temp;
			}
		}
#pragma omp barrier

		int offset = 0;  
		for (int i = 0; i < nb; i++) {
			counts[i+bi] = (t_counts[i+bi] - counts[i+bi]) + offset;
			offset += t_counts[(num_threads-1) * (nb+3) + i];
		} 
#pragma omp barrier

		/* Write keys/vals into correct locations in temp array */
		for (int i = beg; i < end; i++) {
			for (int j = 0; j < nb; j++) 
				if (keys[i] < splitters[j]) { 
					idx = j;
					break;
				}
			keys_t[counts[bi+idx]] = keys[i];
			vals_t[counts[bi+idx]] = vals[i];
			counts[bi+idx]++;
		}
	}
	return 0;
}	

/* ------------------------------------------------------------------------
 */
template <class Key, class Val>
int
sort_by_key (Key* keys, Val* vals, int size)
{
	int num_bits = sizeof(Key) * 8; // number of bits per key

	int num_radix_bits = 16;

	// Number of possible digit combinations, given the number of radix bits
	int num_buckets = (int) pow (2.0, num_radix_bits);

	int num_threads;

#pragma omp parallel 
	{ 
		num_threads = omp_get_num_threads();
	}

	Key* keys_t;
	Val* vals_t;

	keys_t = (Key*) malloc (size * sizeof (Key));
	vals_t = (Val*) malloc (size * sizeof (Val));

	int* my_buckets;
	int* buckets;

	my_buckets = (int*) malloc ((num_buckets+3) * num_threads * sizeof (int));
	buckets = (int*) malloc ((num_buckets+3) * num_threads * sizeof (int));

	bool swap = false;

	for (int shift = 0; shift < num_bits; shift += num_radix_bits) {
		long mask = static_cast<long>((1 << num_radix_bits) - 1);
#pragma omp parallel 
		{
			int tid = omp_get_thread_num();
			int num_threads = omp_get_num_threads();
			int bi = tid * (num_buckets + 3);
			int beg = tid * size/num_threads;
			int end = (tid == num_threads-1) ? size : (tid+1) * size/num_threads; 

			/* Clear bucket counts */
			for (int i = 0; i < num_buckets+3; i++) {
				my_buckets[bi + i] = 0;
			}

			if (swap) {
				/* Compute histogram */
				for (int i = beg; i < end; i++) {
					my_buckets[bi + ((keys_t[i] >> shift) & mask)]++; 
				}
#pragma omp barrier

				/* Compute prefix-sums */
				int b = tid * ((num_buckets+3)/num_threads);
				int e =  (tid ==  num_threads-1) ? (num_buckets+3) : (tid+1) * ((num_buckets+3)/num_threads);
				for (int i = b; i < e; i++) {
					int last = buckets[i] = my_buckets[i];
					for (int j = 1; j < num_threads; j++) {
						int temp = buckets[j*(num_buckets+3) + i] = last + my_buckets[j*(num_buckets+3) + i];
						last = temp;
					}
				}
#pragma omp barrier

				int offset = 0;  
				for (int i = 0; i < num_buckets+3; i++) {
					my_buckets[i+bi] = (buckets[i+bi] - my_buckets[i+bi]) + offset;
					offset += buckets[(num_threads-1) * (num_buckets+3) + i];
				} 
#pragma omp barrier

				/* Write keys/vals into correct locations in temp array */
				for (int i = beg; i < end; i++) {
					int j = (keys_t[i] >> shift) & mask;
					keys[my_buckets[bi+j]] = keys_t[i];
					vals[my_buckets[bi+j]] = vals_t[i];
					my_buckets[bi+j]++;
				}
				swap = false;
			}
			else {
				/* Compute histogram */
				for (int i = beg; i < end; i++) {
					my_buckets[bi + ((keys[i] >> shift) & mask)]++; 
				}
#pragma omp barrier

				/* Compute prefix-sums */
				int b = tid * ((num_buckets+3)/num_threads);
				int e =  (tid ==  num_threads-1) ? (num_buckets+3) : (tid+1) * ((num_buckets+3)/num_threads);
				for (int i = b; i < e; i++) {
					int last = buckets[i] = my_buckets[i];
					for (int j = 1; j < num_threads; j++) {
						int temp = buckets[j*(num_buckets+3) + i] = last + my_buckets[j*(num_buckets+3) + i];
						last = temp;
					}
				}
#pragma omp barrier

				int offset = 0;  
				for (int i = 0; i < num_buckets+3; i++) {
					my_buckets[i+bi] = (buckets[i+bi] - my_buckets[i+bi]) + offset;
					offset += buckets[(num_threads-1) * (num_buckets+3) + i];
				} 
#pragma omp barrier

				/* Write keys/vals into correct locations in temp array */
				for (int i = beg; i < end; i++) {
					int j = (keys[i] >> shift) & mask; 
					keys_t[my_buckets[bi+j]] = keys[i];
					vals_t[my_buckets[bi+j]] = vals[i];
					my_buckets[bi+j]++;
				}
				swap = true;
			}
		}
	}
	return 0;
}

// ****************************************************************************
// Function: RunTest
//
// Purpose:
//   Primary method for the benchmark
//
// Arguments:
//   testName: the name of the test currently being executed (specifying SP or
//             DP)
//   resultDB: results from the benchmark are stored in this db
//   op: the options parser / parameter database
//
// Returns:  nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
template <class Key, class Val>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
    int mpi_size, mpi_rank;
	MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
	MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    
    // Problem Size Initialization
    // Number of Keys\Vals to sort (in MB)
    int probSizes[4] = { 1, 8, 16, 32 };
    int size = probSizes[op.getOptionInt("size")-1];
    
    // Convert to MB
    size = (size * 1000 * 1000) / sizeof(Key);
    
    // If the user specified a custom size, use that instead
    if (op.getOptionInt("mb") != 0)
    {
        size = op.getOptionInt("mb") * 1000 * 1000 / sizeof(Key);
    }
	
    srand48(mpi_rank * 1234); // Seed random number generator

    int num_radix_bits = 16;
    
    // The number of samples per node. 
    // Default to .01% of the number of keys
    int num_samples = (int)((float)size * (0.0001f)) / mpi_size;
    // But use a custom argment if set
    if (op.getOptionFloat("samples") != 0.0f) {
        num_samples = (int)((float)size * op.getOptionFloat("samples")) 
                      / mpi_size;
                      
    }

	assert(num_samples < size); 

	if (mpi_rank == 0) {
		cout << "Number of Samples: " << num_samples << endl;
	}

	// Select random samples
	int* h_random_map;
	h_random_map = (int*) malloc (sizeof(int) * num_samples);
	for (int i = 0; i < num_samples; i++) {
		h_random_map[i] = (long)(rand() % size);
	}

	// Data initialization
	Key* h_keys; 
	Val* h_vals; 
	Key* h_keys_t; 
	Val* h_vals_t; 

	h_keys = (Key*) malloc (sizeof(Key) * size);
	h_vals = (Val*) malloc (sizeof(Val) * size);

	h_keys_t = (Key*) malloc (sizeof(Key) * size);
	h_vals_t = (Val*) malloc (sizeof(Val) * size);

	// Imbalance factor -- if the number of samples is low, a rank may receive
	// many more keys/vals than it starts with.  This "factor" adjusts how large
	// your buffers are (2 = twice the size).
	double imb_fac = 3.0;
	int capacity = (int)(size * imb_fac);

	Key* h_recvd_keys; 
	Val* h_recvd_vals; 

	h_recvd_keys = (Key*) malloc (sizeof(Key) * capacity);
	h_recvd_vals = (Val*) malloc (sizeof(Val) * capacity);

	Key* h_samples; 
	h_samples = (Key*) malloc (sizeof(Key) * num_samples);

	Key* h_all_samples; 
	if (mpi_rank == 0) { // Only allocate this space on the root node
		h_all_samples = (Key*) malloc (sizeof(Key) * mpi_size * num_samples);
	}

	// Counts and displacements (This will be used later for MPI AtA and AtAv
	// It specifies how many items to send to a given MPI rank)
	int* h_send_counts, *h_recv_counts; 
	int* h_send_displs, *h_recv_displs; 

	h_send_counts = (int*) malloc (sizeof(int) * mpi_size);
	h_send_displs = (int*) malloc (sizeof(int) * mpi_size);
	h_recv_counts = (int*) malloc (sizeof(int) * mpi_size);
	h_recv_displs = (int*) malloc (sizeof(int) * mpi_size);

	// Perform multiple passes to get a good measurement
	for (int pass=0; pass < 3; pass++) {
		// Timing variables
		int timer_handle        = 0;
		double compute_time     = 0.;
		double pcie_time        = 0.;
		double mpi_atoa_time    = 0.;
		double smpl_time        = 0.; 
		double bin_time         = 0.;
		double sort_time        = 0.;

		for (int i = 0; i < mpi_size; i++) {
			h_send_counts[i] = 0;
			h_recv_counts[i] = 0;
		}

		for (int i = 0; i < capacity; i++) {
			h_recvd_keys[i] = 0;
		}

		for (int i = 0; i < size; i++)
			h_vals[i] = i;

		// Initialize random data
		srand (mpi_rank * 23);	
		for (int i = 0; i < size; i++) {
			//	h_keys[i] = mpi_rank*size + i;
			h_keys[i] = (static_cast<Key> (rand()) << 32) | rand();
		}

		// Sync after initialization
		MPI_Barrier(MPI_COMM_WORLD);

		// First thing to do is collect samples from the data.
		timer_handle = Timer::Start();
		for (int i = 0; i < num_samples; i++) {
			h_samples[i] = h_keys[h_random_map[i]];
		}
		compute_time += Timer::Stop(timer_handle, "cpu compute");

		MPI_Barrier(MPI_COMM_WORLD);

		// Gather all the samples on root node
		timer_handle = Timer::Start();
		MPI_Gather(h_samples,     num_samples, MPI_LONG_LONG_INT, 
				h_all_samples, num_samples, MPI_LONG_LONG_INT,
				0, MPI_COMM_WORLD);

		// If I am the root process, sort all the samples
		if (mpi_rank == 0) {
			sort(h_all_samples, h_all_samples+(num_samples*mpi_size));
			for (int i = 0; i < mpi_size-1; i++) {
				h_samples[i] = h_all_samples[((i+1) * num_samples) - 1];
			}
			h_samples[mpi_size-1] = LONG_MAX;
		}

		// Broadcast the samples that form the keyspace partitioning.  This is
		// an array with one entry per node s.t. for node N, all vals that 
		// should be sent to that no
		MPI_Bcast(h_samples, mpi_size, MPI_LONG_LONG_INT, 0, MPI_COMM_WORLD);
		smpl_time += Timer::Stop(timer_handle, "sampling_time");

		/* Bin keys/vals based on selected splitters */
		timer_handle = Timer::Start();
		bin_by_key (h_keys, h_vals, h_keys_t, h_vals_t, h_samples, size, mpi_size);
		bin_time += Timer::Stop(timer_handle, "bin time");

		timer_handle = Timer::Start();
		int dest = 0;
		for (int i = 0; i < size; i++) {
			if (h_keys_t[i] < h_samples[dest]) {
				h_send_counts[dest]++; 
			} else {
				dest++;
				h_send_counts[dest]++;
			}
		}
		compute_time += Timer::Stop(timer_handle, "cpu compute");

		MPI_Barrier(MPI_COMM_WORLD); // Sync before All to Alls

		// AlltoAll on send counts
		timer_handle = Timer::Start(); 
		MPI_Alltoall(h_send_counts, 1, MPI_INT, 
				h_recv_counts, 1, MPI_INT, MPI_COMM_WORLD);

		mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa_time");

		// Make sure there is enough room to receive all the keys
		timer_handle = Timer::Start();
		long total_n = 0;
		for (int i = 0; i < mpi_size; i++) {
			total_n += h_recv_counts[i];
		}
		// Fail if there isn't enough room to receive keys
		if (total_n >= capacity) {
			cout << "Total_n: " << total_n << " and capacity: " << capacity << endl;
			assert(total_n < capacity);
		}
		// Calculate displacements (for mpi_alltoallv)
		exclusive_scan(mpi_size, h_send_counts, h_send_displs);
		exclusive_scan(mpi_size, h_recv_counts, h_recv_displs);

		compute_time += Timer::Stop(timer_handle, "cpu_compute_time");

		// Execute the All to allv's
		timer_handle = Timer::Start();

		MPI_Alltoallv(h_keys_t, 
				h_send_counts, 
				h_send_displs, 
				MPI_LONG_LONG_INT,
				h_recvd_keys,        
				h_recv_counts,
				h_recv_displs,
				MPI_LONG_LONG_INT,
				MPI_COMM_WORLD);

		MPI_Alltoallv(h_vals_t, 
				h_send_counts, 
				h_send_displs, 
				MPI_LONG_LONG_INT,
				h_recvd_vals,        
				h_recv_counts,
				h_recv_displs,
				MPI_LONG_LONG_INT,
				MPI_COMM_WORLD);

		mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa_time");

		// Locally sort data
		timer_handle = Timer::Start();
		sort_by_key (h_recvd_keys, h_recvd_vals, total_n);
		sort_time += Timer::Stop(timer_handle, "sort");

		// Check if sorted
		bool sorted = true; 
		for (int i = 0; i < total_n-1; i++) {
			if (h_recvd_keys[i] > h_recvd_keys[i+1]) {
				sorted = false;
			}
        }

		if (!sorted) 
		{
			cout << "Error: rank " << mpi_rank << " is not sorted!" << endl; 
		} else {
			if (mpi_rank == 0) {
				cout << "Test Passed" << endl;
			}
		}

		// Report timings to results database
		char atts[1024];
		double total_time = compute_time + mpi_atoa_time    + smpl_time + sort_time + bin_time;
		sprintf(atts, "%d items", size);
		resultDB.AddResult("-overall",     atts, "s",    total_time);
		resultDB.AddResult("-mpi-atoa",    atts, "s",    mpi_atoa_time);
		resultDB.AddResult("-smpl",        atts, "s",    smpl_time);
		resultDB.AddResult("-sort",        atts, "s",    sort_time);
		resultDB.AddResult("-bin", 	       atts, "s",    bin_time);
		resultDB.AddResult("-cpu_compute", atts, "s",    compute_time);        

	} // for (passes)
	
	// Free host memory
	free (h_keys);
	free (h_vals);
	free (h_keys_t);
	free (h_vals_t);
	free (h_recvd_keys);
	free (h_recvd_vals);
	free (h_samples);
	if (mpi_rank == 0)
		free (h_all_samples);
	free (h_send_counts);
	free (h_send_displs);
	free (h_recv_counts);
	free (h_recv_displs);
	free (h_random_map);
}
