#include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>

#include <cassert>
#include <iostream>
#include <vector>

#include "Functors.h"
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"

#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/random.h>


using namespace std;

void addBenchmarkSpecOptions(OptionParser &op) 
{
    op.addOption("mb", OPT_INT, "0", "data size (in MB)");
}

template <typename T>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
    
    // Collect basic MPI information
    int mpi_size, mpi_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    
    ///
    // Problem Size Initialization
    //
    
    // Number of elemens to sort (in MB)
    int probSizes[4] = { 1, 8, 16, 32 };
    int size = probSizes[op.getOptionInt("size")-1];
    
    // Convert to MB
    size = (size * 1000 * 1000) / sizeof(T);
    
    if (op.getOptionInt("mb") != 0)
    {
        size = op.getOptionInt("mb") * 1000 * 1000 / sizeof(T);
    }
    
    // We're going to sort these numbers n bits at a time.
    // num_radix_bits holds that value.  Increasing it means less
    // communication, but greater requirements for storage and 
    // a greater number of kernel launches.
    int num_bits = sizeof(T) * 8; // number of bits per key
                                  // assumes 8 bits per byte
    // The number of bits in a radix digit.
    // Note that if you change this, you need to update the
    // bitmask functors in Functors.h as well. 
    int num_radix_bits = 16;
    
    // Number of possible digits, given the number of radix bits
    int num_digits = (int)pow(2.0, num_radix_bits);

    ///
    // Host Data Initialization
    ///
    
    // Allocate pinned memory for host problem data
    // These arrays hold the key-value pairs. Note that two copies
    // arrays are instantiated.  This is because MPI AlltoAll does
    // not support in-place operation.
    T* h_keys;
    T* h_packed_keys;
    T* h_vals;
    T* h_packed_vals;
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_keys,        size * sizeof(T)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_packed_keys, size * sizeof(T)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_vals,        size * sizeof(T)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_packed_vals, size * sizeof(T)));
    
    // These arrays are used in the indexing calculation.
    long*         h_counts; // how many keys with a given digit are present 
                            // on this node
    long* h_scanned_counts; // how many keys with a given digit are present 
                            // on "lower" nodes
    long* h_totaled_counts; // how many keys with lower digits are present 
                            // on all nodes

    // So, roughly speaking, the idx calc will be:
    // index = totaled_counts[i] + scanned counts[i] + counts[i]
    // Counts we can figure out locally, but scanned_counts requires an 
    // MPI_ExScan and totaled counts requires a MPI_Broadcast 
    // (after the ExScan) from the highest node
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_counts,         
                                  num_digits * sizeof(long)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_scanned_counts, 
                                  num_digits * sizeof(long)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_totaled_counts, 
                                  num_digits * sizeof(long)));

    // Counts and displacements (This will be used later for MPI AtA and AtAv
    // It specifies how many items to send to a given MPI rank)
    int* h_send_counts;
    int* h_recv_counts; 
    int* h_send_displs;
    int* h_recv_displs; 
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_send_counts, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recv_counts, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_send_displs, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recv_displs, 
                                  mpi_size * sizeof(int)));
    
    ///
    // Device Data Initialization
    ///

    // Allocate device problem data
    thrust::device_vector<T> d_keys(size);
    thrust::device_vector<T> d_vals(size);
    // Get raw pointer for memcpys, etc.
    T* d_raw_keys = thrust::raw_pointer_cast(&d_keys[0]);
    T* d_raw_vals = thrust::raw_pointer_cast(&d_vals[0]);
    
    thrust::device_vector<int> d_digits(num_digits);
    thrust::sequence(d_digits.begin(), d_digits.end(), 0);

	thrust::device_vector<long> d_counts(num_digits);
    thrust::device_vector<long> d_lscanned_counts(num_digits);
    long* d_raw_counts = thrust::raw_pointer_cast(&d_counts[0]);
    
    for (int pass=0; pass < op.getOptionInt("passes"); pass++) {
        // Initialize all host memory to zero
        thrust::fill(h_keys,           h_keys+size,                 0);
        thrust::fill(h_packed_keys,    h_packed_keys+size,          0);
        thrust::fill(h_vals,           h_vals+size,                 0);
        thrust::fill(h_packed_vals,    h_packed_vals+size,          0);
        thrust::fill(h_counts,         h_counts+num_digits,         0);
        thrust::fill(h_scanned_counts, h_scanned_counts+num_digits, 0);
        thrust::fill(h_totaled_counts, h_totaled_counts+num_digits, 0);
        thrust::fill(h_send_counts,    h_send_counts+mpi_size,      0);
        thrust::fill(h_send_displs,    h_send_displs+mpi_size,      0);
        thrust::fill(h_recv_counts,    h_recv_counts+mpi_size,      0);
        thrust::fill(h_recv_displs,    h_recv_displs+mpi_size,      0);
    	
	    // Initialize the values sequentially
        thrust::sequence(d_vals.begin(), d_vals.end());
        
        // Constrain digits to first radix
        //thrust::transform(d_keys.begin(), d_keys.end(), d_keys.begin(), 
        //  modulo_functor(16));
    
        // Initialize random data
        for (int i = 0; i < size; i++) {
            h_keys[i] = (T)(lrand48() << 32L) | lrand48();
        }
        
        // Initialize already sorted data
        //thrust::sequence(d_keys.begin(), d_keys.begin()+size, 
        //                 mpi_rank * size);
        
        // Initialize cyclic sorted data
        //thrust::sequence(d_keys.begin(), d_keys.begin()+size); 
        
        // Initialize reverse data
        //for (int i = 0; i < size; i++) {
        //    // Need to do this in 64 bit
        //    h_keys[i] = ((long)size * (long)mpi_size) - 
        //    (long)(mpi_size - mpi_rank)*(long)size - (long)i;
        //}
        
        // Initialize cyclic reverse data
        //for (int i = 0; i < size; i++) {
        //    h_keys[i] = size - i;                        
        //}

        CUDA_SAFE_CALL(cudaMemcpy(d_raw_keys, h_keys,
            size * sizeof(T), cudaMemcpyHostToDevice));

        
        // Timing variables
        double cpu_compute_time=0., gpu_compute_time=0.;
        double mpi_idx_time=0., mpi_atoa_time=0., pcie_time=0.;
        int timer_handle;
    
        MPI_Barrier(MPI_COMM_WORLD);

        ///
        // Begin main sorting loop
        ///
        for (int shift = 0; shift < num_bits; shift += num_radix_bits) {
            
            timer_handle = Timer::Start();            
            // The transform iterator applies a bitmask, leaving only the 
            // relevant bits for this iteration.
            // Sort based on this transformed key (treat the original k-v pair 
            // as the value).
            thrust::stable_sort_by_key(
                thrust::make_transform_iterator(d_keys.begin(), bitmask_plain(shift)),
                thrust::make_transform_iterator(d_keys.end(),   bitmask_plain(shift)),
                thrust::make_zip_iterator(thrust::make_tuple(d_keys.begin(),
                                                             d_vals.begin())));
            
            // Determine occurrences of each digit using a vectorized binary 
            // search and parallel difference
            thrust::upper_bound(
                thrust::make_transform_iterator(d_keys.begin(), bitmask_plain(shift)),
                thrust::make_transform_iterator(d_keys.end(),   bitmask_plain(shift)),
                d_digits.begin(), d_digits.end(), d_counts.begin());
            
            thrust::adjacent_difference(d_counts.begin(), d_counts.end(),
                                        d_counts.begin());
		
            gpu_compute_time += Timer::Stop(timer_handle, "gpu compute");
       
            // Now that data has been locally sorted (according to the current 
            // radix) perform the indexing calculation to prep for the exchange

            // Copy counts back to host
            timer_handle = Timer::Start();
            CUDA_SAFE_CALL(cudaMemcpy(h_counts, d_raw_counts, 
                           num_digits * sizeof(long), cudaMemcpyDeviceToHost));
		
            // Kickoff async data transfer of key/value pairs 
            // (hopefully overlapping with MPI and indexing calculation)
            // Keys
            CUDA_SAFE_CALL(cudaMemcpyAsync(h_packed_keys, d_raw_keys, 
                size * sizeof(T), cudaMemcpyDeviceToHost));
            // Vals       
            CUDA_SAFE_CALL(cudaMemcpyAsync(h_packed_vals, d_raw_vals, 
                size * sizeof(T), cudaMemcpyDeviceToHost));
		    pcie_time += Timer::Stop(timer_handle, "pcie_time");

            // Execute MPI exclusive scan (prefix sum) on counts        
            timer_handle = Timer::Start();
            MPI_Exscan(h_counts, h_scanned_counts, num_digits, 
                MPI_LONG_LONG_INT, MPI_SUM, MPI_COMM_WORLD);
        
            // The last rank needs to populate the totals array
            // then broadcast that information.
            if (mpi_rank == mpi_size-1)
            {
                // Calculate totals
                long accum = 0; 
                for (int k = 1; k < num_digits; k++) 
                {
                    accum += h_counts[k-1] + h_scanned_counts[k-1];
                    h_totaled_counts[k] = accum;
                }
                h_totaled_counts[0] = 0;
            }

            // Broadcast results to all ranks
            MPI_Bcast(h_totaled_counts, num_digits, MPI_LONG_LONG_INT, 
                mpi_size-1, MPI_COMM_WORLD);
            mpi_idx_time += Timer::Stop(timer_handle, "mpi_idx_time");
        
    		timer_handle = Timer::Start();
	    	
            // Count how many elements will go to each rank
            // Clear old counts
	    	thrust::fill(h_send_counts, h_send_counts+mpi_size, 0);
    		
            // Calculate new ones
	    	for (int i = 0; i < num_digits; i++) 
	    	{
		    	long base = h_scanned_counts[i] + h_totaled_counts[i];
		    	for (int j = 0; j < h_counts[i]; j++) {
			        h_send_counts[(base+j)/size]++;
			    }
		    }
		    cpu_compute_time += Timer::Stop(timer_handle, "cpu_compute");
    

            MPI_Barrier(MPI_COMM_WORLD); // Sync before All to Alls

            // AlltoAll on send counts
            timer_handle = Timer::Start();
            MPI_Alltoall( h_send_counts, 1, MPI_INT, 
                          h_recv_counts, 1, MPI_INT, MPI_COMM_WORLD );
            mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa");
        
            // Prepare for the AlltoAllv with actual data
		    timer_handle = Timer::Start();
            cudaThreadSynchronize(); // Make sure the async key/value transfers
                                     // are finished, any extra time we have to
                                     // wait is counted as PCIe.
            pcie_time += Timer::Stop(timer_handle, "pcie_time");

            // Calculate displacements (for mpi_alltoallv)
            timer_handle = Timer::Start();
            thrust::exclusive_scan(h_send_counts, h_send_counts+mpi_size, 
                                   h_send_displs);
            thrust::exclusive_scan(h_recv_counts, h_recv_counts+mpi_size, 
                                   h_recv_displs);
            cpu_compute_time += Timer::Stop(timer_handle, "cpu_compute");
    
            // Execute the All to all v
            timer_handle = Timer::Start();
            // One might consider fusing these two all-to-allvs, although 
            // such an optimization would require more elaborate packing.
            MPI_Alltoallv( h_packed_keys, 
                           h_send_counts, 
                           h_send_displs, 
                           MPI_LONG_LONG_INT,
                           h_keys,        
                           h_recv_counts,
                           h_recv_displs,
                           MPI_LONG_LONG_INT,
                           MPI_COMM_WORLD );
        
            MPI_Alltoallv( h_packed_vals, 
                           h_send_counts, 
                           h_send_displs, 
                           MPI_LONG_LONG_INT,
                           h_vals,        
                           h_recv_counts,
                           h_recv_displs,
                           MPI_LONG_LONG_INT,
                           MPI_COMM_WORLD );
        
            mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa");

            // Copy data back to GPU
            timer_handle = Timer::Start();
            CUDA_SAFE_CALL(cudaMemcpy(d_raw_keys, h_keys, size * sizeof(T), 
                cudaMemcpyHostToDevice));
            CUDA_SAFE_CALL(cudaMemcpy(d_raw_vals, h_vals, size * sizeof(T), 
                cudaMemcpyHostToDevice));
            pcie_time += Timer::Stop(timer_handle, "pcie_time");
    
            // Local Partial Sort on current radix (values become unsorted from 
            // the all to all). Have to sort them again to maintain stability.
            timer_handle = Timer::Start();
            
            thrust::stable_sort_by_key(
                thrust::make_transform_iterator(d_keys.begin(), bitmask_plain(shift)),
                thrust::make_transform_iterator(d_keys.end(),   bitmask_plain(shift)),
                thrust::make_zip_iterator(thrust::make_tuple(d_keys.begin(),
                                                             d_vals.begin())));

            gpu_compute_time += Timer::Stop(timer_handle,"gpu_compute_time");
        }
    
        MPI_Barrier(MPI_COMM_WORLD);
    
        // Check if sorted
        bool sorted = thrust::is_sorted(d_keys.begin(), d_keys.end());
        if (!sorted) 
        {
            cout << "Error: rank " << mpi_rank << " is not sorted!" << endl;
        }

        // Report timings to results database
        char atts[1024];
        double gbs = (double) (2 * mpi_size * size * sizeof(T)) / 
                              (1000. * 1000. * 1000.);
        double total_time = gpu_compute_time + cpu_compute_time + 
                            pcie_time + mpi_atoa_time + mpi_idx_time;
                       
        sprintf(atts, "%d items", size);
        resultDB.AddResult(testName+"-overall-tp",  atts, "GB/s", gbs / total_time);
        resultDB.AddResult(testName+"-overall",     atts, "s",    total_time);
        resultDB.AddResult(testName+"-mpi-atoa",    atts, "s",    mpi_atoa_time);
        resultDB.AddResult(testName+"-mpi-idx",     atts, "s",    mpi_idx_time);
        resultDB.AddResult(testName+"-pcie",        atts, "s",    pcie_time);
        resultDB.AddResult(testName+"-gpu_compute", atts, "s",    gpu_compute_time);
        resultDB.AddResult(testName+"-cpu_compute", atts, "s",    cpu_compute_time);
    }

    // Free host pinned memory
    CUDA_SAFE_CALL(cudaFreeHost(h_send_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_send_displs));
    CUDA_SAFE_CALL(cudaFreeHost(h_recv_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_recv_displs));
    CUDA_SAFE_CALL(cudaFreeHost(h_keys));
    CUDA_SAFE_CALL(cudaFreeHost(h_packed_keys));
    CUDA_SAFE_CALL(cudaFreeHost(h_vals));
    CUDA_SAFE_CALL(cudaFreeHost(h_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_scanned_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_totaled_counts));
}


void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
    RunTest<long>("long", resultDB, op);
}
