#include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>

#include <cassert>
#include <iostream>
#include <vector>

#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"

#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>

using namespace std;

void addBenchmarkSpecOptions(OptionParser &op) 
{
    op.addOption("mb", OPT_INT, "0", "data size (in MB)");
    // The oversampling factor basically just specifies how
    // many samples each rank should take as a percentage of
    // the total number of samples that belong to the rank.
    // Eg. .0025 = size * 0.0025 = 0.25% sampling rate
    op.addOption("samples", OPT_FLOAT, "0.00", "number of samples");
}

template <typename Key, typename Val>
void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op)
{
    // Collect basic MPI information
    int mpi_size, mpi_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    srand48(mpi_rank * 1234); // Seed random number generator
    
    // Problem Size Initialization
    // Number of Keys\Vals to sort (in MB)
    int probSizes[4] = { 1, 8, 16, 32 };
    int size = probSizes[op.getOptionInt("size")-1];
    
    // Convert to MB
    size = (size * 1000 * 1000) / sizeof(Key);
    
    // If the user specified a custom size, use that instead
    if (op.getOptionInt("mb") != 0)
    {
        size = op.getOptionInt("mb") * 1000 * 1000 / sizeof(Key);
    }

    // The number of samples per node. 
    // Default to .01% of the number of keys
    int num_samples = (int)((float)size * (0.0001f)) / mpi_size;
    // But use a custom argment if set
    if (op.getOptionFloat("samples") != 0.0f) {
        num_samples = (int)((float)size * op.getOptionFloat("samples")) 
                      / mpi_size;
                      
    }
    assert(num_samples < size); 
    if (mpi_rank == 0) {
        cout << "Number of Samples: " << num_samples << endl;
    }

    // Device vector for selecting the random samples
    // The idea is to pre-generate a vector of random
    // indices into the key array to use for all sampling
    thrust::host_vector<int> h_random_map(num_samples);
    for (int i = 0; i < num_samples; i++) {
        h_random_map[i] = (long)(drand48() * (size-1));
    }
    thrust::device_vector<int> d_random_map(num_samples);
    d_random_map = h_random_map;
    
    // Host Data Initialization
    // Allocate pinned memory for host problem data
    Key* h_keys, *h_recvd_keys;
    Val* h_vals, *h_recvd_vals;
    
    // Imbalance factor -- if the number of samples is low, a rank may receive
    // many more keys/vals than it starts with.  This "factor" adjusts how large
    // your buffers are (2 = twice the size).
    double imb_fac = 2.0;
    int capacity = (int)(size * imb_fac);
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_keys, size * sizeof(Key)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_vals, size * sizeof(Val)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recvd_keys, 
                                  capacity * sizeof(Key)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recvd_vals, 
                                  capacity * sizeof(Val)));
    
    Key* h_samples;     // storage for the samples on the host
    Key* h_all_samples; // storage for all the samples on the root node
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_samples, 
                                  num_samples * sizeof(Key)));
    
    if (mpi_rank == 0) { // Only allocate this space on the root node
        CUDA_SAFE_CALL(cudaMallocHost((void**)&h_all_samples, 
                       mpi_size * num_samples * sizeof(Key)));
    }
    thrust::device_vector<Key> d_samples(num_samples);
    Key* d_raw_samples = thrust::raw_pointer_cast(&d_samples[0]);

    // Counts and displacements (This will be used later for MPI AtA and AtAv
    // It specifies how many items to send to a given MPI rank)
    int* h_send_counts, *h_recv_counts; 
    int* h_send_displs, *h_recv_displs; 
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_send_counts, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recv_counts, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_send_displs, 
                                  mpi_size * sizeof(int)));
    CUDA_SAFE_CALL(cudaMallocHost((void**)&h_recv_displs, 
                                  mpi_size * sizeof(int)));
    
    // Device Data Initialization
    // Allocate device problem data
    thrust::device_vector<Key> d_keys(capacity);
    thrust::device_vector<Val> d_vals(capacity);
    
    // Get raw pointers for memcpys, etc.
    Key* d_raw_keys = thrust::raw_pointer_cast(&d_keys[0]);
    Val* d_raw_vals = thrust::raw_pointer_cast(&d_vals[0]);
    
    // Perform multiple passes to get a good measurement
    for (int pass=0; pass < op.getOptionInt("passes"); pass++) {
        
        // Timing variables
        int timer_handle        = 0;
        double gpu_compute_time = 0.;
        double cpu_compute_time = 0.;
        double pcie_time        = 0.;
        double mpi_atoa_time    = 0.;
        double smpl_time        = 0.; // Time to sort the samples, includes 
                                      // some MPI and some CPU compute

        // Initialize all host memory to zero
        thrust::fill(h_keys,        h_keys        + size,     0);
        thrust::fill(h_vals,        h_vals        + size,     0);
        thrust::fill(h_recvd_keys,  h_recvd_keys  + capacity, 0);
        thrust::fill(h_recvd_vals,  h_recvd_vals  + capacity, 0);
        thrust::fill(h_send_counts, h_send_counts + mpi_size, 0);
        thrust::fill(h_send_displs, h_send_displs + mpi_size, 0);
        thrust::fill(h_recv_counts, h_recv_counts + mpi_size, 0);
        thrust::fill(h_recv_displs, h_recv_displs + mpi_size, 0);
    	
        thrust::sequence(d_vals.begin(), d_vals.begin()+size);
	    
        // Initialize random data
        for (int i = 0; i < size; i++) {
            h_keys[i] = (Key)(lrand48() << 32L) | lrand48();
        }

        // Initialize already sorted data
        //thrust::sequence(d_keys.begin(), d_keys.begin()+size, 
        //                 mpi_rank * size);
        
        // Initialize cyclic sorted data
        //thrust::sequence(d_keys.begin(), d_keys.begin()+size); 
        
        // Initialize reverse data
        //for (int i = 0; i < size; i++) {
        //    // Need to do this in 64 bit
        //    h_keys[i] = ((long)size * (long)mpi_size) - 
        //    (long)(mpi_size - mpi_rank)*(long)size - (long)i;
        //}
        
        // Initialize cyclic reverse data
        //for (int i = 0; i < size; i++) {
        //    h_keys[i] = size - i;                        
        //}

        CUDA_SAFE_CALL(cudaMemcpy(d_raw_keys, h_keys,
            size * sizeof(Key), cudaMemcpyHostToDevice));

        // Sync after initialization
        MPI_Barrier(MPI_COMM_WORLD);
        
        // First thing to do is collect samples from the data.
        // Fill host array with random keys from the GPU
        timer_handle = Timer::Start();
        thrust::gather(d_random_map.begin(), d_random_map.end(),
                       d_keys.begin(),
                       d_samples.begin());
        gpu_compute_time += Timer::Stop(timer_handle, "gpu compute");

        // Copy samples back to host for MPI transfer
        timer_handle = Timer::Start();
        CUDA_SAFE_CALL(cudaMemcpyAsync(h_samples, d_raw_samples,
            num_samples * sizeof(Key), cudaMemcpyDeviceToHost));
		pcie_time += Timer::Stop(timer_handle, "pcie_time");
        
        // Start preparing for for the global key exchange.
        // Sort keys/vals locally (somewhat heavyweight, but this way they 
        // are packed for the all_to_allv exchange)
        // Ideally this would be asynchronous and overlap with the sampling
        // process. However, Thrust doesn't have very good support for
        // asynchronicity.  Best option is probably to spawn a separate thread,
        // and have that kick off a Thrust sort.
        timer_handle = Timer::Start();
        thrust::stable_sort_by_key(d_keys.begin(), d_keys.begin()+size,
                                   d_vals.begin());
        gpu_compute_time += Timer::Stop(timer_handle, "gpu compute");

        // Kickoff async read of keys back from device 
        timer_handle = Timer::Start();
        CUDA_SAFE_CALL(cudaMemcpyAsync(h_keys, d_raw_keys, 
            size * sizeof(Key), cudaMemcpyDeviceToHost));
        // Vals       
        CUDA_SAFE_CALL(cudaMemcpyAsync(h_vals, d_raw_vals, 
            size * sizeof(Val), cudaMemcpyDeviceToHost));
        pcie_time += Timer::Stop(timer_handle, "pcie_time");

        // Gather all the samples on root node
        timer_handle = Timer::Start();
        MPI_Gather(h_samples,     num_samples, MPI_LONG_LONG_INT, 
                   h_all_samples, num_samples, MPI_LONG_LONG_INT,
                   0, MPI_COMM_WORLD);
        
        // If I am the root process, sort all the samples
        if (mpi_rank == 0) {
            // TODO: Note that this local sort is taking place on the CPU,
            // but could also be done on the GPU if there was any speedup.
            // It's done on the CPU now since I'm worried about that the PCIe
            // overhead will really drop performance for small sample sizes.
            thrust::sort(h_all_samples, h_all_samples+(num_samples*mpi_size));
            // Next, construct the keyspace partitioning from the sorted samples
            // This basically reconciles the differences between number of nodes 
            // and total number of samples.  Since we use a constant number of 
            // samples per node, this is basically just a packing operation.
            for (int i = 0; i < mpi_size-1; i++) {
                h_samples[i] = h_all_samples[((i+1) * num_samples) - 1];
            }
            // This may be a little confusing, but we set the last element to
            // LONG_MAX.  This is because there may be keys that are larger than
            // any sample, and if we used the highest sample value, those large
            // keys would not get asigned to a destination node.
            h_samples[mpi_size-1] = LONG_MAX;
        }
        // Broadcast the samples that form the keyspace partitioning.  This is
        // an array with one entry per node s.t. for node N, all vals that 
        // should be sent to that node are <= entry[N]
        MPI_Bcast(h_samples, mpi_size, MPI_LONG_LONG_INT, 0, MPI_COMM_WORLD);
        smpl_time += Timer::Stop(timer_handle, "sampling_time");
      
        // Make sure the async transfer of keys/vals is finished.
        // In most cases it should be, since MPI sampling time is generally
        // more than PCIe.
        timer_handle = Timer::Start();
        cudaThreadSynchronize();
        pcie_time += Timer::Stop(timer_handle, "pcie_time");
        
        // Count how many elements will go to each rank
        // TODO: Again, this computation could take place on
        // the GPU.  However, it would require 2 PCIe transfers
        //  a) transfer of keyspace partitioning to GPU
        //  b) transfer of calculated send counts back to the CPU
        // KS: 9/14 the previous version which used host thrust calls
        // was quite slow compared to the following:
	    timer_handle = Timer::Start();
        int dest = 0;
        for (int i = 0; i < size; i++) {
            if (h_keys[i] < h_samples[dest]) {
                h_send_counts[dest]++; 
            } else {
                dest++;
                h_send_counts[dest]++;
            }
        }
        cpu_compute_time += Timer::Stop(timer_handle, "cpu compute");
        
        MPI_Barrier(MPI_COMM_WORLD); // Sync before All to Alls
        
        // AlltoAll on send counts
        timer_handle = Timer::Start(); 
        MPI_Alltoall(h_send_counts, 1, MPI_INT, 
                     h_recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
        
        mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa_time");
        
        // Make sure there is enough room to receive all the keys
        timer_handle = Timer::Start();
        long total_n = 0;
        for (int i = 0; i < mpi_size; i++) {
            total_n += h_recv_counts[i];
        }
        // Fail if there isn't enough room to receive keys
        if (total_n >= capacity) {
            cout << "Total_n: " << total_n << " and capacity: " << capacity << endl;
            assert(total_n < capacity);
        }
        // Calculate displacements (for mpi_alltoallv)
        thrust::exclusive_scan(h_send_counts, h_send_counts+mpi_size, h_send_displs);
        thrust::exclusive_scan(h_recv_counts, h_recv_counts+mpi_size, h_recv_displs);        

        cpu_compute_time += Timer::Stop(timer_handle, "cpu_compute_time");

        // Execute the All to allv's
        timer_handle = Timer::Start();

        MPI_Alltoallv(h_keys, 
                      h_send_counts, 
                      h_send_displs, 
                      MPI_LONG_LONG_INT,
                      h_recvd_keys,        
                      h_recv_counts,
                      h_recv_displs,
                      MPI_LONG_LONG_INT,
                      MPI_COMM_WORLD);

        
        MPI_Alltoallv(h_vals, 
                      h_send_counts, 
                      h_send_displs, 
                      MPI_LONG_LONG_INT,
                      h_recvd_vals,        
                      h_recv_counts,
                      h_recv_displs,
                      MPI_LONG_LONG_INT,
                      MPI_COMM_WORLD);

        mpi_atoa_time += Timer::Stop(timer_handle, "mpi_atoa_time");
        
        // Copy data back to GPU
        timer_handle = Timer::Start();
        // Keys
        CUDA_SAFE_CALL(cudaMemcpy(d_raw_keys, h_recvd_keys, total_n * sizeof(Key), 
            cudaMemcpyHostToDevice));
        // Vals
        CUDA_SAFE_CALL(cudaMemcpy(d_raw_vals, h_recvd_vals, total_n * sizeof(Val), 
            cudaMemcpyHostToDevice));
        pcie_time += Timer::Stop(timer_handle, "pcie_time");
        
        timer_handle = Timer::Start();
        thrust::stable_sort_by_key(d_keys.begin(), d_keys.begin()+total_n,
                                   d_vals.begin());
        gpu_compute_time += Timer::Stop(timer_handle, "gpu_compute_time");
        
        // Check if sorted
        bool sorted = thrust::is_sorted(d_keys.begin(), d_keys.begin()+total_n);
        if (!sorted) 
        {
            cout << "Error: rank " << mpi_rank << " is not sorted!" << endl; 
        } else {
            if (mpi_rank == 0) {
                cout << "Test Passed" << endl;
            }
        }
        
        // Report timings to results database
        char atts[1024];
        double gbs = (double) (2. * mpi_size * size * sizeof(Key)) / 
                     (1000. * 1000. * 1000.);
        double total_time = gpu_compute_time + cpu_compute_time + pcie_time +
                            mpi_atoa_time    + smpl_time;
                       
        sprintf(atts, "%d items", size);
        resultDB.AddResult(testName+"-overall-tp",  atts, "GB/s", gbs / total_time);
        resultDB.AddResult(testName+"-overall",     atts, "s",    total_time);
        resultDB.AddResult(testName+"-mpi-atoa",    atts, "s",    mpi_atoa_time);
        resultDB.AddResult(testName+"-smpl",        atts, "s",    smpl_time);
        resultDB.AddResult(testName+"-pcie",        atts, "s",    pcie_time);
        resultDB.AddResult(testName+"-gpu_compute", atts, "s",    gpu_compute_time);
        resultDB.AddResult(testName+"-cpu_compute", atts, "s",    cpu_compute_time);        
        
    } // for (passes)

    // Free host memory
    CUDA_SAFE_CALL(cudaFreeHost(h_send_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_send_displs));
    CUDA_SAFE_CALL(cudaFreeHost(h_recv_counts));
    CUDA_SAFE_CALL(cudaFreeHost(h_recv_displs));
    CUDA_SAFE_CALL(cudaFreeHost(h_keys));
    CUDA_SAFE_CALL(cudaFreeHost(h_vals));
    CUDA_SAFE_CALL(cudaFreeHost(h_recvd_keys));
    CUDA_SAFE_CALL(cudaFreeHost(h_recvd_vals));
    CUDA_SAFE_CALL(cudaFreeHost(h_samples));
    if (mpi_rank == 0) {
        CUDA_SAFE_CALL(cudaFreeHost(h_all_samples));
    }
}

void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
    RunTest<long long,long long>("long", resultDB, op);
}
