#include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>

#include <cassert>
#include <iostream>
#include <vector>

#include "Functors.h"
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"

#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/random.h>

using namespace std;

void addBenchmarkSpecOptions(OptionParser &op) 
{
    op.addOption("mb", OPT_INT, "0", "data size (in MB)");
}

template <typename T>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
    
    int mpi_size, mpi_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    srand48(mpi_rank * 1234);
    
    // Number of elements to sort (in MB)
    int probSizes[4] = { 1, 8, 16, 32 };
    int size = probSizes[op.getOptionInt("size")-1];
    
    // Convert MB to number of elements to be sorted
    size = (size * 1000 * 1000) / sizeof(T);
    
    if (op.getOptionInt("mb") != 0)
    {
        size = op.getOptionInt("mb") * 1000 * 1000 / sizeof(T);
    }
    
  
    int num_bits = sizeof(T) * 8; // number of bits per key

    int num_radix_bits = 16; //TODO: Make it a parameter

    // Number of possible digit combinations, given the number of radix bits
    int num_digits = (int)pow(2.0, num_radix_bits);

    // Data Initialization
    thrust::host_vector<T> h_keys (size); 
    thrust::host_vector<T> h_packed_keys (size);
    thrust::host_vector<T> h_vals (size);
    thrust::host_vector<T> h_packed_vals (size);

    // These arrays are used in the indexing calculation.
    thrust::host_vector<long> h_counts (num_digits); // how many keys with a given digit are present 
                            		 // on this node
    long* h_scanned_counts; // how many keys with a given digit are present 
                            			 // on "lower" nodes
    long* h_totaled_counts; // how many keys with lower digits are present 
                            			 // on all nodes
   	
    // So, roughly speaking, the idx calc will be:
    // index = totaled_counts[i] + scanned counts[i] + counts[i]
    // counts we can figure out locally, but scanned_counts requires an 
    // MPI_ExScan and totaled counts requires a MPI_Broadcast 
    // (after the ExScan) from the highest node
    h_scanned_counts = (long*) malloc (num_digits * sizeof(long));
    assert (h_scanned_counts);
    h_totaled_counts = (long*) malloc (num_digits * sizeof(long));
    assert (h_totaled_counts);

    // Counts and displacements (This will be used later for MPI AtA and AtAv
    // It specifies how many items to send to a given MPI rank)
    int* h_send_counts;
    int* h_recv_counts; 
    int* h_send_displs;
    int* h_recv_displs; 

    h_send_counts = (int*) malloc (mpi_size * sizeof(int));
    assert (h_send_counts);
    h_recv_counts = (int*) malloc (mpi_size * sizeof(int));
    assert (h_recv_counts);
    h_send_displs = (int*) malloc (mpi_size * sizeof(int));
    assert (h_send_displs);
    h_recv_displs = (int*) malloc (mpi_size * sizeof(int));
    assert (h_recv_displs);
    
    thrust::host_vector<int> h_trans_keys (size);

    thrust::host_vector<int> h_digits (num_digits);
    thrust::sequence(h_digits.begin(), h_digits.end(), 0);

    for (int pass=0; pass < op.getOptionInt("passes"); pass++) {
        // Initialize all host memory to zero
        thrust::fill(h_keys.begin(),           h_keys.end(),                 0);
        thrust::fill(h_packed_keys.begin(),    h_packed_keys.end(),          0);
        thrust::fill(h_vals.begin(),           h_vals.end(),                 0);
        thrust::fill(h_packed_vals.begin(),    h_packed_vals.end(),          0);
        thrust::fill(h_counts.begin(),         h_counts.end(),         0);
        thrust::fill(h_scanned_counts, h_scanned_counts+num_digits, 0);
        thrust::fill(h_totaled_counts, h_totaled_counts+num_digits, 0);
        thrust::fill(h_send_counts,    h_send_counts+mpi_size,      0);
        thrust::fill(h_send_displs,    h_send_displs+mpi_size,      0);
        thrust::fill(h_recv_counts,    h_recv_counts+mpi_size,      0);
        thrust::fill(h_recv_displs,    h_recv_displs+mpi_size,      0);
    
	//initialize sorted vals
	thrust::sequence(h_packed_vals.begin(), h_packed_vals.end());
	
	//initialize sorted keys
	//thrust::sequence(h_packed_keys.begin(), h_packed_keys.end(), mpi_rank * size);
        
        // Initialize Uniform random data
        for (int i = 0; i < size; i++){
          h_packed_keys[i] = ((long)rand() << 32) | (long)rand();
	}

        // Timing variables
        double cpu_compute_time=0.0;
        double mpi_idx_time=0.0;
	double mpi_atoa_time=0.0;
	
	double temp_time;
	double cpu_sort_bucket_time=0.0;
	double cpu_global_indexing_time=0.0;
	double mpi_atoa_counts_time=0.0;
	double cpu_displacements_time=0.0;
	double mpi_atoa_keys_vals_time=0.0;
	double cpu_partial_sort_time=0.0;

        int timer_handle;
    
        MPI_Barrier(MPI_COMM_WORLD);

        // Begin main sorting loop
        ///
        for (int shift = 0; shift < num_bits; shift += num_radix_bits)
        {
//CPU sorting and bucketing//////////////////////////////////////////////////////////////////////
            timer_handle = Timer::Start();
            // Apply a bitmask, breaking 64 bit key to num_radix_bits bits
            thrust::transform(h_packed_keys.begin(), h_packed_keys.end(), 
                              h_trans_keys.begin(),
                              bitmask_plain(shift));
            
	    // Sort based on the transformed key (treat the original k-v pair 
            // as the value)
            thrust::stable_sort_by_key(h_trans_keys.begin(), h_trans_keys.end(),
                                       thrust::make_zip_iterator(
                                       thrust::make_tuple(h_packed_keys.begin(),
                                                          h_packed_vals.begin())));
            
            // Count the occurance of each digit from possible num_digits using a vectorized binary search and 
            // parallel difference
            thrust::upper_bound(h_trans_keys.begin(), h_trans_keys.end(), 
                                h_digits.begin(), h_digits.end(), 
                                h_counts.begin());
            thrust::adjacent_difference(h_counts.begin(), h_counts.end(),
                                        h_counts.begin());
	    temp_time = Timer::Stop(timer_handle, "cpu compute");
	    cpu_sort_bucket_time += temp_time;
            cpu_compute_time += temp_time;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////

            // Now that data has been locally sorted (according to the current 
            // radix) perform the indexing calculation to prep for the exchange

//MPI Indexing////////////////////////////////////////////////////////////////////////////////////////////////////
            // Execute MPI exclusive scan (excludes calling processor) (prefix sum) on counts        
            timer_handle = Timer::Start();
            MPI_Exscan(&h_counts[0], h_scanned_counts, num_digits, 
                MPI_LONG_LONG_INT, MPI_SUM, MPI_COMM_WORLD);

            // The last rank needs to populate the totals array
            // then broadcast that information.
            if (mpi_rank == mpi_size-1)
            {
                // Calculate totals
                long accum = 0; 
                for (int k = 1; k < num_digits; k++) 
                {
                    accum += h_counts[k-1] + h_scanned_counts[k-1];
                    h_totaled_counts[k] = accum;
                }
                h_totaled_counts[0] = 0;
            }

            // Broadcast results to all ranks
            MPI_Bcast(h_totaled_counts, num_digits, MPI_LONG_LONG_INT, mpi_size-1, MPI_COMM_WORLD);
            mpi_idx_time += Timer::Stop(timer_handle, "mpi_idx_time");
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
        
//CPU Calculate Global Index//////////////////////////////////////////////////////////////////////////////////////
            timer_handle = Timer::Start();
            // Clear old counts
            thrust::fill(h_send_counts, h_send_counts+mpi_size, 0);

            // Calculate global index
            for (int i = 0; i < num_digits; i++) 
            {
              long base = h_scanned_counts[i] + h_totaled_counts[i];
              for (int j = 0; j < h_counts[i]; j++) {
                h_send_counts[(base+j)/size]++;
              }
            }
	    temp_time = Timer::Stop(timer_handle, "cpu_compute");
	    cpu_global_indexing_time += temp_time;
            cpu_compute_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
            MPI_Barrier(MPI_COMM_WORLD); // Sync before All to Alls

//MPI All-to-All counts////////////////////////////////////////////////////////////////////////////////////////////
            // AlltoAll on send counts
            timer_handle = Timer::Start();
            MPI_Alltoall( h_send_counts, 1, MPI_INT, 
                          h_recv_counts, 1, MPI_INT, MPI_COMM_WORLD );
	    temp_time = Timer::Stop(timer_handle, "mpi_atoa");
	    mpi_atoa_counts_time += temp_time;
            mpi_atoa_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

//CPU Calculate displacements//////////////////////////////////////////////////////////////////////////////////////
            // Calculate displacements (for mpi_alltoallv)
            timer_handle = Timer::Start();
            thrust::exclusive_scan(h_send_counts, h_send_counts+mpi_size, h_send_displs);
            thrust::exclusive_scan(h_recv_counts, h_recv_counts+mpi_size, h_recv_displs);
	    temp_time = Timer::Stop(timer_handle, "cpu_compute");
            cpu_displacements_time += temp_time;
	    cpu_compute_time += temp_time;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

//MPI All-to-All keys and values///////////////////////////////////////////////////////////////////////////////////
            // Execute the All to all v
            // One might consider fusing these two
            // all-to-allvs, although such an optimization
            // would require more elaborate packing.
            // Keys
            timer_handle = Timer::Start();

            MPI_Alltoallv( &h_packed_keys[0], 
                           h_send_counts, 
                           h_send_displs, 
                           MPI_LONG_LONG_INT,
                           &h_keys[0],        
                           h_recv_counts,
                           h_recv_displs,
                           MPI_LONG_LONG_INT,
                           MPI_COMM_WORLD );

            MPI_Alltoallv( &h_packed_vals[0], 
                           h_send_counts, 
                           h_send_displs, 
                           MPI_LONG_LONG_INT,
                           &h_vals[0],        
                           h_recv_counts,
                           h_recv_displs,
                           MPI_LONG_LONG_INT,
                           MPI_COMM_WORLD );
            temp_time = Timer::Stop(timer_handle, "mpi_atoa");
	    mpi_atoa_keys_vals_time += temp_time;
	    mpi_atoa_time += temp_time;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 

//CPU Local Partial Sort////////////////////////////////////////////////////////////////////////////////////////////
            // Local Partial Sort on current radix (values become unsorted from 
            // the all to all). Have to sort them again to maintain stability.
            
            // Apply a bitmask, trimming 64 key down to num_radix_bits bits
            timer_handle = Timer::Start();
            thrust::copy (h_keys.begin(), h_keys.end(), h_packed_keys.begin());
            thrust::copy (h_vals.begin(), h_vals.end(), h_packed_vals.begin());

            thrust::transform(h_packed_keys.begin(), h_packed_keys.end(), h_trans_keys.begin(),
                              bitmask_plain(shift));
        
            // Sort based on the transformed key (treat the original 
            // k-v pair as the value)
            thrust::stable_sort_by_key(h_trans_keys.begin(), h_trans_keys.end(),
                                       thrust::make_zip_iterator(
                                           thrust::make_tuple(h_packed_keys.begin(),
                                                              h_packed_vals.begin())));

	    temp_time = Timer::Stop(timer_handle,"cpu_compute_time");
	    cpu_partial_sort_time += temp_time;
            cpu_compute_time += temp_time;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

        }
        MPI_Barrier(MPI_COMM_WORLD);
        
        // Check if sorted
        bool sorted = thrust::is_sorted(h_packed_keys.begin(), h_packed_keys.end());
        if (!sorted) 
        {
            cout << "Error: rank " << mpi_rank << " is not sorted!" << endl;
        }

        // Report timings to results database
        char atts[1024];
        double gbs = (double) (2 * mpi_size * size * sizeof(T)) / (1000. * 1000. * 1000.);
        double total_time = cpu_compute_time + mpi_atoa_time + mpi_idx_time;
                       
        sprintf(atts, "%d items", size);
        resultDB.AddResult(testName+"-overall-tp",  atts, "GB/s", gbs / total_time);
        resultDB.AddResult(testName+"-overall",     atts, "s",    total_time);
        resultDB.AddResult(testName+"-mpi-atoa",    atts, "s",    mpi_atoa_time);
        resultDB.AddResult(testName+"-mpi-idx",     atts, "s",    mpi_idx_time);
        resultDB.AddResult(testName+"-cpu_compute", atts, "s",    cpu_compute_time);
	//New
	resultDB.AddResult(testName+"-cpu-sort-bucket", atts, "s",    cpu_sort_bucket_time);
	resultDB.AddResult(testName+"-cpu-global-indexing", atts, "s",   cpu_global_indexing_time);
	resultDB.AddResult(testName+"-mpi-atoa-counts", atts, "s",    mpi_atoa_counts_time);
	resultDB.AddResult(testName+"-cpu-displacements", atts, "s",    cpu_displacements_time);
	resultDB.AddResult(testName+"-mpi-atoa-keys-vals", atts, "s",    mpi_atoa_keys_vals_time);
	resultDB.AddResult(testName+"-cpu-partial-sort", atts, "s",    cpu_partial_sort_time);

    }

    // Free memory
    free (h_send_counts);
    free (h_send_displs);
    free (h_recv_counts);
    free (h_recv_displs);
    free (h_scanned_counts);
    free (h_totaled_counts);
}


void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
    RunTest<long>("long", resultDB, op);
}
