#include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>

#include <cassert>
#include <iostream>
#include <vector>

#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"

#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>

using namespace std;

void addBenchmarkSpecOptions(OptionParser &op) 
{
    op.addOption("mb", OPT_INT, "0", "data size (in MB)");
    // The oversampling factor basically just specifies how
    // many samples each rank should take as a percentage of
    // the total number of samples that belong to the rank.
    // Eg. .0025 = size * 0.0025 = 0.25% sampling rate
    op.addOption("samples", OPT_FLOAT, "0.00", "number of samples");
}

template <typename Key, typename Val>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
    // Collect basic MPI information
    int mpi_size, mpi_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    srand48(mpi_rank * 1234); // Seed random number generator
    
    // Problem Size Initialization
    // Number of Keys\Vals to sort (in MB)
    int probSizes[4] = { 1, 8, 16, 32 };
    int size = probSizes[op.getOptionInt("size")-1];
    
    // Convert to MB
    size = (size * 1000 * 1000) / sizeof(Key);
    
    // If the user specified a custom size, use that instead
    if (op.getOptionInt("mb") != 0)
    {
        size = op.getOptionInt("mb") * 1000 * 1000 / sizeof(Key);
    }

    // The number of samples per node. 
    // Default to 1% of the number of keys
    int num_samples = (int)((float)size * (0.0001f)) / mpi_size;
    // But use a custom argment if set
    if (op.getOptionFloat("samples") != 0.0f) {
        num_samples = (int)((float)size * op.getOptionFloat("samples")) 
                      / mpi_size;
                      
    }
    assert(num_samples < size); 
    if (mpi_rank == 0) {
        cout << "Number of Samples: " << num_samples << endl;
    }

    // The idea is to pre-generate a vector of random
    // indices into the key array to use for all sampling
    thrust::host_vector<int> h_random_map(num_samples);
    for (int i = 0; i < num_samples; i++) {
        h_random_map[i] = (long)(drand48() * (size-1));
    }
    
    // Imbalance factor -- if the number of samples is low, a rank may receive
    // many more keys/vals than it starts with.  This "factor" adjusts how large
    // your buffers are (2 = twice the size).
    double imb_fac = 3.0;
    int capacity = (int)(size * imb_fac);

    // Host Data Initialization
    // We have two copies of the keys and values, since MPI all to allv does
    // not support an in place option.
    thrust::host_vector<Key> h_keys(size);
    thrust::host_vector<Key> h_recv_keys(capacity);
    
    thrust::host_vector<Key> h_vals(size);
    thrust::host_vector<Key> h_recv_vals(capacity);
    
    thrust::host_vector<Key> h_samples(num_samples);
    thrust::host_vector<Key> h_all_samples(mpi_size * num_samples);
    
    // Counts and displacements (This will be used later for MPI AtA and AtAv
    // It specifies how many items to send to a given MPI rank)
    thrust::host_vector<int> h_send_counts(mpi_size);
    thrust::host_vector<int> h_recv_counts(mpi_size);
    thrust::host_vector<int> h_send_displs(mpi_size);
    thrust::host_vector<int> h_recv_displs(mpi_size);

    // Perform multiple passes to get a good measurement
    for (int pass=0; pass < op.getOptionInt("passes"); pass++) {
        
        // Timing variables
        double cpu_compute_time = 0.0;
        double mpi_atoa_time    = 0.0;
        double smpl_time        = 0.0;

	double temp_time;
	double cpu_gather_samples_time=0.0;
	double cpu_initial_sort_time=0.0;
	double mpi_gather_samples_time=0.0;	
	double cpu_sort_samples_time=0.0;
	double mpi_broadcast_samples_time=0.0;
	double cpu_global_indexing_time=0.0;
	double mpi_atoa_counts_time=0.0;
	double cpu_displacements_time=0.0;
	double mpi_atoa_keys_vals_time=0.0;
	double cpu_partial_sort_time=0.0;

	int timer_handle;

        // Initialize all host memory to zero
        thrust::fill(h_keys.begin(),        h_keys.end(),        0);
        thrust::fill(h_vals.begin(),        h_vals.end(),        0);
        thrust::fill(h_recv_keys.begin(),  h_recv_keys.end(),  0);
        thrust::fill(h_recv_vals.begin(),  h_recv_vals.end(),  0);
        thrust::fill(h_send_counts.begin(), h_send_counts.end(), 0);
        thrust::fill(h_send_displs.begin(), h_send_displs.end(), 0);
        thrust::fill(h_recv_counts.begin(), h_recv_counts.end(), 0);
        thrust::fill(h_recv_displs.begin(), h_recv_displs.end(), 0);
    	
        thrust::sequence(h_vals.begin(), h_vals.end());
	    
        // Initialize random data
        //for (int i = 0; i < size; i++) {
        //    h_keys[i] = (Key)(lrand48() << 32L) | lrand48();
        //}

        // Initialize already sorted data
        thrust::sequence(h_keys.begin(), h_keys.end(), 
                         mpi_rank * size);
        
        // Initialize cyclic sorted data
        //thrust::sequence(h_keys.begin(), h_keys.end()); 
        
        // Initialize reverse data
        //for (int i = 0; i < size; i++) {
        //    // Need to do this in 64 bit
        //    h_keys[i] = ((long)size * (long)mpi_size) - 
        //    (long)(mpi_size - mpi_rank)*(long)size - (long)i;
        //}
        
        // Initialize cyclic reverse data
        //for (int i = 0; i < size; i++) {
        //    h_keys[i] = size - i;                        
        //}

        // Sync after initialization
        MPI_Barrier(MPI_COMM_WORLD);
       
//CPU gather samples/////////////////////////////////////////////////////////////////////////// 
        // First thing to do is collect random samples from the data,
        // based on the pre-generated random map.
        timer_handle = Timer::Start();
        thrust::gather(h_random_map.begin(), h_random_map.end(),
                       h_keys.begin(),
                       h_samples.begin());
	temp_time = Timer::Stop(timer_handle, "cpu compute");
	cpu_gather_samples_time += temp_time;
        cpu_compute_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////

//CPU Initial Sorting//////////////////////////////////////////////////////////////////////////////////
        // Local sort (heavyweight, but preps for MPI atoav)
        timer_handle = Timer::Start();
        thrust::stable_sort_by_key(h_keys.begin(), h_keys.end(),
                                   h_vals.begin());
	temp_time = Timer::Stop(timer_handle, "cpu compute");
	cpu_initial_sort_time += temp_time;
        cpu_compute_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////

//MPI gather samples///////////////////////////////////////////////////////////////////////////
        // Gather all the samples on root node
        timer_handle = Timer::Start();
        MPI_Gather(&(h_samples[0]),     num_samples, MPI_LONG_LONG_INT, 
                   &(h_all_samples[0]), num_samples, MPI_LONG_LONG_INT,
                   0, MPI_COMM_WORLD);
	temp_time = Timer::Stop(timer_handle, "sampling");
	mpi_gather_samples_time += temp_time;
	smpl_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////        

//CPU Sort Samples/////////////////////////////////////////////////////////////////////////////
        timer_handle = Timer::Start();
	// If I am the root process, sort all the samples
        if (mpi_rank == 0) {
            thrust::sort(h_all_samples.begin(), h_all_samples.end());
            // Next, construct the keyspace partitioning from the sorted samples
            // This basically reconciles the differences between number of nodes 
            // and total number of samples.  Since we use a constant number of 
            // samples per node, this is basically just a packing operation.
            for (int i = 0; i < mpi_size-1; i++) {
                h_samples[i] = h_all_samples[((i+1) * num_samples) - 1];
            }
            // This may be a little confusing, but we set the last element to
            // LONG_MAX.  This is because there may be keys that are larger than
            // any sample, and if we used the highest sample value, those large
            // keys would not get asigned to a destination node.
            h_samples[mpi_size-1] = LONG_MAX;
        }
	temp_time = Timer::Stop(timer_handle, "sampling");
	cpu_sort_samples_time += temp_time;
	smpl_time += temp_time;
////////////////////////////////////////////////////////////////////////////////////////////////

//MPI Broadcast Samples/////////////////////////////////////////////////////////////////////////
        timer_handle = Timer::Start();
	// Broadcast the samples that form the keyspace partitioning.  This is
        // an array with one entry per node s.t. for node N, all vals that 
        // should be sent to that node are <= entry[N]
        MPI_Bcast(&(h_samples[0]), mpi_size, MPI_LONG_LONG_INT, 0, MPI_COMM_WORLD);
        temp_time = Timer::Stop(timer_handle, "sampling");
	mpi_broadcast_samples_time += temp_time;
	smpl_time += temp_time;
////////////////////////////////////////////////////////////////////////////////////////////////        

//CPU Calculate Global Index////////////////////////////////////////////////////////////////////
        // Count how many elements will go to each rank
        // KS: Note that the previous version which used host thrust calls
        // was quite slow compared to the following:
	timer_handle = Timer::Start();
        int dest = 0;
        for (int i = 0; i < size; i++) {
            if (h_keys[i] < h_samples[dest]) {
                h_send_counts[dest]++; 
            } else {
                dest++;
                h_send_counts[dest]++;
            }
        }
	temp_time = Timer::Stop(timer_handle, "cpu compute");
	cpu_global_indexing_time += temp_time;
        cpu_compute_time += temp_time;
/////////////////////////////////////////////////////////////////////////////////////////////////

        MPI_Barrier(MPI_COMM_WORLD); // Sync before All to Alls

//MPI All-to-All counts//////////////////////////////////////////////////////////////////////////        
        // AlltoAll on send counts
        timer_handle = Timer::Start(); 
        MPI_Alltoall(&(h_send_counts[0]), 1, MPI_INT, 
                     &(h_recv_counts[0]), 1, MPI_INT, MPI_COMM_WORLD);
        temp_time = Timer::Stop(timer_handle, "mpi atoa");
	mpi_atoa_counts_time += temp_time;
        mpi_atoa_time += temp_time;
/////////////////////////////////////////////////////////////////////////////////////////////////
        
//CPU Calculate Displacements////////////////////////////////////////////////////////////////////
        // Make sure there is enough room to receive all the keys
        timer_handle = Timer::Start();
        long total_n = 0;
        for (int i = 0; i < mpi_size; i++) {
            total_n += h_recv_counts[i];
        }
        // Fail if there isn't enough room to receive keys
        if (total_n >= capacity) {
            cout << "Total_n: " << total_n << " and capacity: " << capacity << endl;
            assert(total_n < capacity);
        }
        // Calculate displacements (for mpi_alltoallv)
        thrust::exclusive_scan(h_send_counts.begin(), h_send_counts.end(), 
            h_send_displs.begin());
        thrust::exclusive_scan(h_recv_counts.begin(), h_recv_counts.end(), 
            h_recv_displs.begin());
	temp_time = Timer::Stop(timer_handle, "cpu compute");
	cpu_displacements_time += temp_time;
        cpu_compute_time += temp_time;
//////////////////////////////////////////////////////////////////////////////////////////////////

//MPI All-to-All keys and values//////////////////////////////////////////////////////////////////
        // Execute the All to allv's
        timer_handle = Timer::Start();
        MPI_Alltoallv(&(h_keys[0]), 
                      &(h_send_counts[0]), 
                      &(h_send_displs[0]), 
                      MPI_LONG_LONG_INT,
                      &(h_recv_keys[0]),        
                      &(h_recv_counts[0]),
                      &(h_recv_displs[0]),
                      MPI_LONG_LONG_INT,
                      MPI_COMM_WORLD);

        MPI_Alltoallv(&(h_vals[0]), 
                      &(h_send_counts[0]), 
                      &(h_send_displs[0]), 
                      MPI_LONG_LONG_INT,
                      &(h_recv_vals[0]),        
                      &(h_recv_counts[0]),
                      &(h_recv_displs[0]),
                      MPI_LONG_LONG_INT,
                      MPI_COMM_WORLD);

	temp_time = Timer::Stop(timer_handle, "mpi atoa");
	mpi_atoa_keys_vals_time += temp_time;
        mpi_atoa_time += temp_time;
//////////////////////////////////////////////////////////////////////////////////////////////////        

//CPU Local Partial Sort//////////////////////////////////////////////////////////////////////////
        timer_handle = Timer::Start();
        thrust::stable_sort_by_key(h_recv_keys.begin(), 
                                   h_recv_keys.begin()+total_n,
                                   h_recv_vals.begin());
       
	temp_time = Timer::Stop(timer_handle, "cpu compute"); 
        cpu_partial_sort_time += temp_time;
	cpu_compute_time += temp_time;
//////////////////////////////////////////////////////////////////////////////////////////////////

        // Check if sorted
        bool sorted = thrust::is_sorted(h_recv_keys.begin(), h_recv_keys.begin()+total_n);
        if (!sorted) 
        {
            cout << "Error: rank " << mpi_rank << " is not sorted!" << endl; 
        } else {
            if (mpi_rank == 0) {
                cout << "Test Passed" << endl;
            }
        }
        
        // Report timings to results database
        char atts[1024];
        double gbs = (double) (2. * mpi_size * size * sizeof(Key)) / 
                     (1000. * 1000. * 1000.);
        double total_time = cpu_compute_time + mpi_atoa_time + smpl_time;
                       
        sprintf(atts, "%d items", size);
        resultDB.AddResult(testName+"-overall-tp",  atts, "GB/s", gbs / total_time);
        resultDB.AddResult(testName+"-overall",     atts, "s",    total_time);
        resultDB.AddResult(testName+"-mpi-atoa",    atts, "s",    mpi_atoa_time);
        resultDB.AddResult(testName+"-smpl",        atts, "s",    smpl_time);
        resultDB.AddResult(testName+"-cpu_compute", atts, "s",    cpu_compute_time); 
	//New
	resultDB.AddResult(testName+"-cpu-gather-samples", atts, "s",    cpu_gather_samples_time);
	resultDB.AddResult(testName+"-cpu-initial-sort", atts, "s",    cpu_initial_sort_time);
	resultDB.AddResult(testName+"-mpi-gather-samples", atts, "s",    mpi_gather_samples_time);
	resultDB.AddResult(testName+"-cpu-sort-samples", atts, "s",    cpu_sort_samples_time);
	resultDB.AddResult(testName+"-mpi-broadcast-samples", atts, "s",    mpi_broadcast_samples_time);
	resultDB.AddResult(testName+"-cpu-global-indexing", atts, "s",    cpu_global_indexing_time);
	resultDB.AddResult(testName+"-mpi-atoa-counts", atts, "s",    mpi_atoa_counts_time);
	resultDB.AddResult(testName+"-cpu-displacements", atts, "s",    cpu_displacements_time);
	resultDB.AddResult(testName+"-mpi-atoa-keys-vals", atts, "s",    mpi_atoa_keys_vals_time);
	resultDB.AddResult(testName+"-cpu-partial-sort-time", atts, "s",    cpu_partial_sort_time);
    } // for (passes)

}

void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
    RunTest<long long,long long>("long", resultDB, op);
}
