/*
 *  Copyright 2008-2011 NVIDIA Corporation
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */


/*! \file bitonic_sort.inl
 *  \brief Inline file for bitonic_sort.h.
 *  \note This algorithm is based on the one described
 *        in "Fast in-place comparison-based sorting CUDA: 
 *        a study with Bitonic Sort", 
 *        Concurrency and Computation: Practice and Experience 23(7) 
 *        by Hagen Peters, Ole Schulz-Hildebrandt and Norbert Luttenberger.
 * 
 *        Shibdas Bandyopadhyay   07/13/2011
 */

// do not attempt to compile this file with any other compiler
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC

#include <cmath>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/swap.h>
#include <thrust/detail/backend/dereference.h>
#include <thrust/detail/mpl/math.h> // for log2<N>
#include <thrust/detail/backend/cuda/arch.h>
#include <thrust/detail/backend/cuda/block/copy.h>
#include <thrust/detail/backend/cuda/detail/launch_closure.h>
#include <thrust/detail/backend/cuda/extern_shared_ptr.h>

__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN

namespace thrust
{
namespace detail
{
namespace backend
{
namespace cuda
{
namespace detail
{
namespace bitonic_sort_dev_namespace
{

// Does normalized conditional swap while bitonic merging sequences of size "stride"
template<typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
static __forceinline__ __device__ void normalized_conditional_swap(Size keys_idx,
                                                                   Size stride,
                                                                   Size phase,
                                                                   Size n,
                                                                   RandomAccessIterator keys,
                                                                   StrictWeakOrdering comp)
{
    typedef typename thrust::iterator_value<RandomAccessIterator>::type KeyType;
    // index into the first sequence to be merged, we can use '&' in place of '%' as stride is a power of 2
    Size this_idx      = 2 * keys_idx - (keys_idx & (stride - 1));
    // For regular bitonic merge, conjugate is at (this_idx + stride) but for normalized 
    // bitonic merge, the sequences is sorted in reverse direction, so read from the back.
    Size seq_no        = (this_idx + stride) >> phase; // same as (this_idx + stride) / stride, improves perf
    Size offset        = (this_idx + stride) - seq_no * stride;
    Size end_seq       = (seq_no + 1) * stride - 1; // end = start of next seq - 1
    // Set the offset from the end 
    Size this_swap_idx = end_seq - offset; 
    
    if(this_swap_idx < n)
    {
        RandomAccessIterator a = keys + this_idx;
        RandomAccessIterator b = keys + this_swap_idx;
            
        KeyType n1 = thrust::detail::backend::dereference(a);
        KeyType n2 = thrust::detail::backend::dereference(b);
            
        thrust::detail::conditional_swap(n1, n2, comp);
            
        thrust::detail::backend::dereference(a) = n1;
        thrust::detail::backend::dereference(b) = n2;
    }
}

// Offset between the keys in a k-multistep partition -- 2^(step_no - k - 1)
template<int multistep,
         typename Size>
__forceinline__ __device__ unsigned int get_multistep_interval(Size step_no)
{
    return (1 << (step_no - (multistep - 1)));
}

// Find out the starting key position for nth multistep partition 
template<int multistep,
         typename Size>
__forceinline__ __device__ unsigned int get_multistep_idx(Size multistep_partition_idx, Size step_no)
{
    // which bitonic merge group are we in?
    const unsigned int this_group_idx = (multistep_partition_idx >> (step_no - (multistep - 1))) * (1 << (step_no + 1));
    // which multistep within the group?
    return(this_group_idx + (multistep_partition_idx & (get_multistep_interval<multistep>(step_no) - 1)));
}

// Heuristic for finding out the multistep size which does not cause any spill
template<typename KeyType>
struct choose_number_of_multisteps
{
    enum {value = sizeof(KeyType) >= 16 ? 1 :
                  sizeof(KeyType) >= 12 ? 2 :
                 (sizeof(KeyType) >= 8 ? 3  :
                 (sizeof(KeyType) >= 4? 3 : 3))};
};

__inline__ size_t compute_merge_cta_size(size_t cuda_arch)
{
    return 128; // works best for Fermi
}

template<typename Size>
__inline__ size_t floor_log2(Size t)
{
    return (std::floor(log2(static_cast<double>(t))));
}

template<int num_keys_per_thread>
__inline__ size_t compute_num_blocks(size_t num_elements, 
                                     size_t cta_size,
                                     size_t max_num_blocks)
{
    size_t num_blocks = num_elements / (num_keys_per_thread * cta_size);
    if(num_elements % (num_keys_per_thread * cta_size)) num_blocks++;
    return thrust::min<size_t>(num_blocks, max_num_blocks);
}

template<typename KeyType>
__inline__ size_t compute_cta_size(const cudaDeviceProp &props)
{
    size_t max_smem_usage = props.sharedMemPerBlock / 2;
    size_t max_threads_per_cta = props.maxThreadsPerBlock / 2;
    size_t max_num_keys      = max_smem_usage / sizeof(KeyType);
    size_t candidate_threads = max_num_keys / 2;
    
    // Round off to nearest power of 2
    candidate_threads = 1 << (static_cast<size_t>(floor_log2(candidate_threads)));
    max_threads_per_cta = 1 << (static_cast<size_t>(floor_log2(max_threads_per_cta)));
    
    return thrust::min<size_t>(candidate_threads, max_threads_per_cta);
}

template<size_t N>
  struct align_size_to_int
{
  static const size_t value = (N / sizeof(int)) + ((N % sizeof(int)) ? 1 : 0);
};

// This template meta-function unrolls to produce the correct order of conditional swaps (cs) 
// between keys in a k-multistep partition.
// In general, a cs during a bitonic merge of two sequences of size "stride" is performed 
// between a key at postions "n" and its "conjugate" at position "(n + stride)".
// A k-multistep does 2^(k-1) cs involving 2^k elements each for "k" consecutive 
// steps in a bitonic merge network in a fixed order and without any sync.
// So, essentially it performs multiple steps of bitnoic merge on a parition of keys, 
// all in registers.   
// Denote a cs between key ki and its conjugate by 
// cs(ki, c_ki) => cs(keys[keys1_position], keys[keys1_position + stride])
// For 1-multistep -- cs(k1, c_k1)
// For 2-multistep -- cs(k1, c_k1); // 1-multistep
//                    cs(k2, c_k2); // 1-multistep
//                    cs(k1, k2); cs(c_k1, c_k2); // cs keys and conjugates
// For 3-multistep -- 2-multistep(k1, k3); 2-multistep(k2, k4);
//                    cs(k1, k2); cs(k3, k4); cs(c_k1, c_k2); cs(c_k3, c_k4);
// A 'k' multistep on an array consisting (key, conjugate) pair can be represented as two 
//'k-1' multisteps involving alternate pairs followed by a set of cs between those alternate keys and conjugates
// e.g. for k = 3 if we have following mapping between array indices and keys 
// array[<0,1>,<2,3>,<4,5>,<6,7>] => array[<k1, c_k1>, <k2, c_k2>, <k3, c_k3>, <k4, c_k4>] then
// multistep_3(array[<0,1>,<2,3>,<4,5><6,7>]) = multistep_2(array[<0,1>,<4,5>]); 
//                                              multistep_2(array[<2,3>,<6,7>]); 
//                                              cs(0,2); cs(4,6); cs(1,3); cs(5,7)
// Initially, the array is populated by the keys at proper locations in the memory
// and for each cs we check to see if we are within limit.
// Keys (in even indices) are read depending on key_idx and interval calculated
// for this multistep parition and conjugates (odd indices) are 'stride' apart from their
// corresponding key postions e.g.
// for k = 2, array[0] = mem[keys_idx]; array[2] = mem[keys_idx + interval] 
// while array[1] = keys_idx + stride; array[3] = keys_idx + interval + stride; 
template<unsigned int step>
struct conditional_swap_recurse
{
    template<unsigned int pivot_idx,
             unsigned int offset,
             typename Size,
             typename KeyType,
             typename RandomAccessIterator,
             typename StrictWeakOrdering>
    static __forceinline__ __device__ void bitonic_merge_multistep_cs(Size keys_idx,
                                                                      Size n,
                                                                      Size interval,
                                                                      Size stride,
                                                                      KeyType *local_keys,
                                                                      RandomAccessIterator global_keys,
                                                                      StrictWeakOrdering comp)
    {
        // Two 'k-1' multisteps on alternate elements
        conditional_swap_recurse<step - 1>::template bitonic_merge_multistep_cs<pivot_idx, 2 * offset>(keys_idx, n, interval, stride, local_keys, global_keys, comp);
        conditional_swap_recurse<step - 1>::template bitonic_merge_multistep_cs<pivot_idx + offset, 2 * offset>(keys_idx, n, interval, stride, local_keys, global_keys, comp);
        
        // We also need to keep track of the position of the keys in the memory to check for limits
        unsigned int cur_idx = pivot_idx;
        Size cur_offset = ((pivot_idx + offset) / 2) * interval;
        
        // Perform condtional swaps on keys and conjugates cs(0,2) and cs(1,3)
        const unsigned int num_pairs = 1 << (step - 2);
        #pragma unroll
        for(int i = 0; i < num_pairs; i++)
        {
            // Conditional swap keys if within limit
            if((keys_idx + cur_offset) < n)
            {
                thrust::detail::conditional_swap(local_keys[cur_idx], local_keys[cur_idx + offset], comp);
            }
            
            // Conditional swap (keys + stride)s if within limits
            if((keys_idx + cur_offset + stride) < n)
            {
                thrust::detail::conditional_swap(local_keys[cur_idx + 1], local_keys[cur_idx + offset + 1], comp);
            }
            
            // Move on to the next pair
            cur_idx    += (offset * 2);
            cur_offset += (offset * interval);
        }
    }
};

// Base case of the multi-step recursion
template<>
struct conditional_swap_recurse<1>
{
    template<int pivot_idx,
             int offset,
             typename Size,
             typename KeyType,
             typename RandomAccessIterator,
             typename StrictWeakOrdering>
    static __forceinline__ __device__ void bitonic_merge_multistep_cs(Size keys_idx,
                                                                      Size n,
                                                                      Size interval,
                                                                      Size stride,
                                                                      KeyType *local_keys,
                                                                      RandomAccessIterator global_keys,
                                                                      StrictWeakOrdering comp)
    {
        Size this_idx = keys_idx + (pivot_idx / 2) * interval;
        
        // load the key and conjugate to local_keys[pivot_pos] and local_keys[pivot_pos + 1]
        if(this_idx < n) 
        { 
            RandomAccessIterator temp  = global_keys + this_idx; 
            local_keys[pivot_idx] = thrust::detail::backend::dereference(temp);
        
        }

        // The condition below and the following cs should be nested inside the previous "if"
        // but keeping them this way gives a much better perf
        Size this_swap_idx = this_idx + stride;
        if(this_swap_idx < n) 
        {    
            RandomAccessIterator temp = global_keys + this_swap_idx; 
            local_keys[pivot_idx + 1] = thrust::detail::backend::dereference(temp);
        }
        
        // Base case is a cs between key and conjugate
        if(this_swap_idx < n) thrust::detail::conditional_swap(local_keys[pivot_idx], local_keys[pivot_idx + 1], comp);
    }
};

template<unsigned int step>
struct multistep_store
{
    template<typename Size,
             typename KeyType,
             typename RandomAccessIterator>
    static __forceinline__ __device__ void bitonic_merge_multistep_store(Size keys_idx,
                                                                         Size n,
                                                                         Size interval,
                                                                         Size stride,
                                                                         KeyType *local_keys, 
                                                                         RandomAccessIterator global_keys)
    {
        unsigned int num_keys = (1 << step);
        Size cur_idx = keys_idx; 
        
        #pragma unroll
        for(int i = 0; i < num_keys; i+= 2)
        {
            if(cur_idx < n) 
            {
                RandomAccessIterator temp  = global_keys + cur_idx;
                thrust::detail::backend::dereference(temp) = local_keys[i];
            }
            
            if((cur_idx + stride) < n)
            {
                RandomAccessIterator temp = global_keys + cur_idx + stride;
                thrust::detail::backend::dereference(temp) = local_keys[i + 1];
            }
            
            cur_idx += interval; 
        }
    }
};

// bitonic_merge_multistep performs a k-multistep on a partition by loading
// all the keys in a register array and performing a set of conditional swaps
// on them and finally writing the registers back to the memory. There should 
// not be any spilling over to the local store. So, 'k' should be chosen such that
// the array is always stored in register (determined from ptxas output)
template<unsigned int step,
         typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
__forceinline__ __device__ void bitonic_merge_multistep(Size keys_idx,
                                                        Size n,
                                                        Size interval,
                                                        Size stride,
                                                        RandomAccessIterator keys,
                                                        StrictWeakOrdering comp)
{
    typedef typename thrust::iterator_value<RandomAccessIterator>::type KeyType;
    
    // A k-multistep partition has 2^(k) elements
    const int num_keys = 1 << (step);
    
    // Allocate a register array large enough to hold all the keys for this multi-step.
    int cur_keys_int[align_size_to_int<num_keys * sizeof(KeyType)>::value];
    KeyType *cur_keys = reinterpret_cast<KeyType*>(cur_keys_int);
    
    // call template meta-function to produce the correct order of conditional swaps at compile time
    conditional_swap_recurse<step>::template bitonic_merge_multistep_cs<0, 2>(keys_idx, n, interval, stride, cur_keys, keys, comp);
    
    // Write the keys in registers back to memory 
    multistep_store<step>::template bitonic_merge_multistep_store(keys_idx, n, interval, stride, cur_keys, keys);
}

template<unsigned int multistep,
         unsigned int num_cs_per_thread,
         typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
__forceinline__ __device__ void bitonic_tile_merge_multistep(RandomAccessIterator keys,
                                                             StrictWeakOrdering comp,
                                                             Size partition_idx,
                                                             Size &stride,
                                                             Size &step_no,
                                                             Size tile_elements)
{
    const unsigned int num_groups    = num_cs_per_thread / (1 << (multistep - 1));
    const unsigned int lowest_stride = (1 << (multistep - 1));
    
    for(;stride >= lowest_stride; stride >>= multistep, step_no -= multistep)
    {
        Size interval = get_multistep_interval<multistep>(step_no);
        Size keys_idx = get_multistep_idx<multistep>(partition_idx, step_no);
        
        // Don't loop if we are doing multistep for only one group (improves perf)
        if(num_groups == 1)
        {
            bitonic_merge_multistep<multistep>(keys_idx, tile_elements, interval, stride, keys, comp);
        }
        else
        {
            Size group_idx = partition_idx;
            #pragma unroll
            for(Size i = 0; i < num_groups; i++)
            {
                bitonic_merge_multistep<multistep>(keys_idx, tile_elements, interval, stride, keys, comp);
                group_idx += blockDim.x;
                keys_idx   = get_multistep_idx<multistep>(group_idx, step_no);
            }
        }
        __syncthreads();
    }
}

// Implements a bitonic multistep merge network in shared memory
template<unsigned int multistep,
         unsigned int num_cs_per_thread,
         typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
__forceinline__ __device__ void bitonic_merge(RandomAccessIterator keys,
                                              StrictWeakOrdering comp,
                                              Size tile_elements,
                                              Size step_no,
                                              Size stride)
{
    // 3-multistep requires roughly 60 registers, don't go beyond for fermi
    if(multistep >= 3) bitonic_tile_merge_multistep<3, num_cs_per_thread>(keys, comp, threadIdx.x, stride, step_no, tile_elements);
    if(multistep >= 2) bitonic_tile_merge_multistep<2, num_cs_per_thread>(keys, comp, threadIdx.x, stride, step_no, tile_elements);
    if(multistep >= 1) bitonic_tile_merge_multistep<1, num_cs_per_thread>(keys, comp, threadIdx.x, stride, step_no, tile_elements);
} // end bitonic_merge()

// Normalized bitonic sort sorts a shared memory load of keys
template<unsigned int multistep,
         unsigned int num_cs_per_thread,
         typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
__forceinline__ __device__ void normalized_bitonic_tile_sort_multistep(RandomAccessIterator keys,
                                                                       Size tile_elements,
                                                                       Size tile_size,
                                                                       StrictWeakOrdering comp)
{
    // log2(k) phases of bitonic sort
    for(Size size = 2, phase = 0; size <= tile_size; size *= 2, phase++)
    {
        // In normalized bitonic sort, all sequences are sorted in one direction but 
        // regular bitonic network expects it to be in alternate directions. So, we 
        // change the first step of each phase to do "normalized" conditional swaps
        // where we read in the reverse order for every alternate sequence ensuring 
        // rest of the bitonic network remains unchanged
        Size stride = size / 2;
        Size    tid = threadIdx.x;
        
        #pragma unroll
        for(int i = 0; i < num_cs_per_thread; i++)
        {
            normalized_conditional_swap(tid, stride, phase, tile_elements, keys, comp);
            tid += blockDim.x;
        }
        __syncthreads();
        
        stride /= 2;
        Size step_no = phase - 1;
        
        // Do rest of the bitonic merge
        if(stride > 0) bitonic_merge<multistep, num_cs_per_thread>(keys, comp, tile_elements, step_no, stride);
    }
} // end normalized_bitonic_block_sort_multistep() 

template<typename RandomAccessIterator,
         typename StrictWeakOrdering,
         typename Size>
  struct normalized_bitonic_global_merge_first_step_closure
{
    RandomAccessIterator keys_first;
    StrictWeakOrdering comp;
    Size stride, phase, n, limit;
    normalized_bitonic_global_merge_first_step_closure(RandomAccessIterator keys_,
                                                       StrictWeakOrdering comp_,
                                                       Size stride_,
                                                       Size phase_,
                                                       Size n_,
                                                       Size limit_)
    : keys_first(keys_),
      comp(comp_),
      n(n_),
      stride(stride_),
      phase(phase_),
      limit(limit_)
    {}
  
    __device__
    void operator ()(void)
    {
        const Size grid_size  = gridDim.x * blockDim.x;
        Size       global_idx = blockIdx.x * blockDim.x + threadIdx.x;
    
        for(; global_idx < limit; global_idx += grid_size)
        {
            normalized_conditional_swap(global_idx, stride, phase, n, keys_first, comp);
        }
    }
};


template<unsigned int multistep,
         typename RandomAccessIterator,
         typename StrictWeakOrdering,
         typename Size>
  struct bitonic_global_merge_multistep_closure
{
    RandomAccessIterator keys_first;
    StrictWeakOrdering comp;
    Size step_no, stride, n, limit;
    bitonic_global_merge_multistep_closure(RandomAccessIterator keys_,
                                           StrictWeakOrdering comp_,
                                           Size step_no_,
                                           Size stride_,
                                           Size n_,
                                           Size limit_)
    : keys_first(keys_),
      comp(comp_),
      n(n_),
      step_no(step_no_),
      stride(stride_),
      limit(limit_)
    {}
  
    __device__
    void operator ()(void)
    {
        const Size grid_size = gridDim.x * blockDim.x;
        Size global_idx      = blockIdx.x * blockDim.x + threadIdx.x;
        Size interval  = get_multistep_interval<multistep>(step_no);
    
        for(; global_idx < limit; global_idx += grid_size)
        {
            Size keys_idx = get_multistep_idx<multistep>(global_idx, step_no);
            bitonic_merge_multistep<multistep>(keys_idx, n, interval, stride, keys_first, comp);
        }
    }
};

template<unsigned int multistep,
         typename RandomAccessIterator,
         typename StrictWeakOrdering,
         typename Size>
  struct bitonic_tile_multistep_merge_closure
{
    RandomAccessIterator keys_first;
    StrictWeakOrdering comp;
    Size n, step_no;
    bitonic_tile_multistep_merge_closure(RandomAccessIterator keys_,
                                         StrictWeakOrdering comp_,
                                         Size step_no_,
                                         Size n_)
    : keys_first(keys_),
      comp(comp_),
      step_no(step_no_),
      n(n_)
    {}

  __device__
  void operator()(void)
  {
    typedef typename thrust::iterator_value<RandomAccessIterator>::type KeyType;
    
    const Size num_cs_per_thread = 1 << (multistep - 1);
    const Size tile_size = 2 * blockDim.x * num_cs_per_thread;
    
    thrust::detail::backend::cuda::extern_shared_ptr<KeyType> shared_ptr;
    KeyType *s_keys = shared_ptr;

    const Size grid_size = gridDim.x * tile_size;
    Size tile_offset     = blockIdx.x * tile_size;
    
    keys_first += tile_offset;
    for(; tile_offset < n; tile_offset  += grid_size, keys_first += grid_size)
    {
        Size tile_elements  = thrust::min<Size>((n- tile_offset), tile_size);
        
        thrust::detail::backend::cuda::block::copy(keys_first, keys_first + tile_elements, s_keys);
        __syncthreads();
        
        // 'stride' starts from 'tile_size / 2' as we are merging two 'tile_size/2' sequences
        Size stride = tile_size / 2;
        bitonic_merge<multistep, num_cs_per_thread>(s_keys, comp, tile_elements, step_no, stride);
        
        thrust::detail::backend::cuda::block::copy(s_keys, s_keys + tile_elements, keys_first);
        __syncthreads();
    }
  }
}; // end bitonic_block_multistep_sort_closure

template<unsigned int multistep,
         typename RandomAccessIterator,
         typename StrictWeakOrdering,
         typename Size>
  struct bitonic_tile_multistep_sort_closure
{
    RandomAccessIterator keys_first;
    StrictWeakOrdering comp;
    Size n;
    bitonic_tile_multistep_sort_closure(RandomAccessIterator keys_,
                                        StrictWeakOrdering comp_,
                                        Size n_)
    : keys_first(keys_),
      comp(comp_),
      n(n_)
    {}

    __device__
    void operator()(void)
    { 
        typedef typename iterator_value<RandomAccessIterator>::type KeyType;
        const Size num_cs_per_thread = 1 << (multistep - 1);
        Size tile_size = 2 * blockDim.x * num_cs_per_thread;
        Size grid_size = gridDim.x * tile_size;
        Size tile_offset     = blockIdx.x * tile_size;
        
        thrust::detail::backend::cuda::extern_shared_ptr<KeyType> shared_ptr;
        KeyType *s_keys = shared_ptr;
        
        // Advance the iterators to the start of the block
        keys_first += tile_offset;
         
        // If we have more number tiles than the thread blocks launched
        for(; tile_offset < n; tile_offset  += grid_size, keys_first += grid_size)
        {
            // Take care of last non-full block
            Size tile_elements  = thrust::min<Size>((n- tile_offset), tile_size);
            
            // Load the tile from global memory to shared memory 
            thrust::detail::backend::cuda::block::copy(keys_first, keys_first + tile_elements, s_keys);
            __syncthreads();
            
            normalized_bitonic_tile_sort_multistep<multistep, num_cs_per_thread>(s_keys, tile_elements, tile_size, comp);

            // write the result back to the global memory
            thrust::detail::backend::cuda::block::copy(s_keys, s_keys + tile_elements, keys_first);
            __syncthreads();
        }
    }
}; // end bitonic_block_multistep_sort_closure

template<int multistep,
         typename Size,
         typename RandomAccessIterator,
         typename StrictWeakOrdering>
__inline void global_multistep_merge(RandomAccessIterator keys,
                                     StrictWeakOrdering comp,
                                     Size &stride,
                                     Size &step_no,
                                     Size n,
                                     Size n_power2,
                                     unsigned int tile_size,
                                     unsigned int merge_cta_size,
                                     unsigned int max_num_blocks)
{
    unsigned int num_blocks = bitonic_sort_dev_namespace::compute_num_blocks<(1 << multistep)>(n_power2, merge_cta_size, max_num_blocks); 
    for(;stride >= (tile_size << (multistep - 1)); stride >>= multistep, 
                                                   step_no -= multistep)
    {
        bitonic_sort_dev_namespace::bitonic_global_merge_multistep_closure<multistep, RandomAccessIterator, StrictWeakOrdering, Size> closure(keys, comp, step_no,
                                                                                                                                              stride, n,
                                                                                                                                              (n_power2) >> multistep);
        launch_closure(closure, num_blocks, merge_cta_size, 0);
    }
}

} // end bitonic_sort_dev_namespace

// bitonic_sort() does a normalized bitonic sort on the keys. The normalized 
// variant of the bitonic sort all conditional swaps in one direction. Hence
// it is divergence free and we can do non-power-of-2 sorting by simply ignoring
// the coditional swaps which go beyond the range.
// Recall, in bitonic sort there are log(n) phases and each phase i has 'i' steps.
// Sequences of increasing sizes are merged in each phase. Hence, first few
// phases can be done within the shared memory. Also, during each step of every phase
// sequences of gradually decreasing sizes are merged and hence last few steps of 
// each phase (after the initial few phases) can also be done in shared memory.
// The optimization in this implementation is due to loading a bunch of keys
// in registers and do multiple steps on them without going back and forth to 
// the global memory. We call them multisteps and they help to reduce the number
// of trips made to the global memory or shared memory in cases when we are merging
// within the shared memory.
// Normalized_bitonic_sort(keys) 
// {
//     Normalized_bitonic_sort_sm(keys); // Do first few phases in shared memory
//     for(ph = firstPhases + 1; ph <= log(n); ph++)
//     {
//          int step = ph;
//          for(; step >= lastSteps + 4; step -= 4)
//              bitonic_merge_4multistep(keys);
//          for(; step >= lastSteps + 3; step -= 3)
//              bitonic_merge_3multistep(keys);
//          for(; step >= lastSteps + 2; step -= 2)
//              bitonic_merge_2multistep(keys);
//          for(; step >= lastSteps + 1; step--)
//              bitonic_merge_1multistep(keys);
//          bitonic_merge_sm(keys); // perform last few steps in shared memory
//      }
//  }
       
template<typename RandomAccessIterator,
         typename StrictWeakOrdering>
void bitonic_sort(RandomAccessIterator keys_first,
                  RandomAccessIterator keys_last,
                  StrictWeakOrdering comp)
{
    typedef typename thrust::iterator_value<RandomAccessIterator>::type KeyType;
    typedef typename thrust::iterator_difference<RandomAccessIterator>::type difference_type;
    
    const difference_type n = keys_last - keys_first;
    
    // don't launch an empty kernel
    if(n == 0) return;
    using namespace bitonic_sort_dev_namespace;
    
    const cudaDeviceProp &props = thrust::detail::backend::cuda::arch::device_properties();
    const size_t max_num_blocks = props.maxGridSize[0];
    // Compute CTA size depending on the size of the key and shared memory size
    unsigned int cta_size      = compute_cta_size<KeyType>(props);
    // set number of blocks to launch assuming each thread does one operation with 2 keys (for 1-multistep)
    unsigned int num_blocks    = compute_num_blocks<2>(n, cta_size, max_num_blocks);
    // Set the number of keys that can be sorted in shared memory by 'cta_size' threads
    unsigned int tile_size     = cta_size * 2;
    // Round off shared memory 'tile_size' to nearest multiple of integers
    unsigned int tile_size_int = ((tile_size * sizeof(KeyType)) / sizeof(int)) + (((tile_size * sizeof(KeyType)) % sizeof(int)) ? 1 : 0);
    unsigned int lg_tile_size  = static_cast<unsigned int>(log2(static_cast<double>(tile_size)));
    
    // Round off n to next power of 2
    difference_type n_power2 = 1 << (static_cast<difference_type>(std::ceil(log2(static_cast<double>(n)))));
    
    // set up the CTA size for the bitonic merge phase
    size_t  merge_cta_size  = compute_merge_cta_size(thrust::detail::backend::cuda::arch::compute_capability(props));

    // Each thread does one conditional swap (for 1-multistep) and hence works on 2 keys
    size_t merge_num_blocks = compute_num_blocks<2>(n_power2, cta_size, max_num_blocks);
    
    // For a n-multistep, 2^n keys are read and put into registers and each thread does 1 << (n - 1)
    // conditional swaps on them. So, set multistep and cta_size based on size of the key
    const size_t multistep = choose_number_of_multisteps<KeyType>::value;
    const size_t cta_size_launch  = cta_size / ( 1 << (multistep - 1));
    
    // Launch kernel to sort the tiles in shared memory using bitonic sort with multistep merges
    // XXX TODO: dynamically choose between unsigned int & difference_type depending on n
    bitonic_tile_multistep_sort_closure<multistep, RandomAccessIterator, StrictWeakOrdering, unsigned int> closure(keys_first, comp, n);
    launch_closure(closure, num_blocks, cta_size_launch, tile_size_int * sizeof(int));
    
    // Carry out rest of the bitonic merge steps
    for(difference_type size = 2 * tile_size, phase = lg_tile_size; size <= n_power2; size *= 2, phase++)
    {
        unsigned int stride = size / 2;
        // First step in normalized bitonic merge is handled differently
        // XXX TODO: dynamically choose between unsigned int & difference_type depending on n
        normalized_bitonic_global_merge_first_step_closure<RandomAccessIterator, StrictWeakOrdering, unsigned int> closure(keys_first, comp, stride,
                                                                                                                           phase, n, n_power2 / 2);
        launch_closure(closure, merge_num_blocks, merge_cta_size, 0);
       
        // Set the stride for the next step and do multisteps merges 
        stride = stride / 2;
        // step_no decreases with each call to do_global_multistep_merge
        unsigned int step_no = phase - 1;
        
        // 4-multistep merge kernel requires roughly 60 registers, don't go beyond for Fermi
        if(sizeof(KeyType) <= 4)  global_multistep_merge<4, unsigned int>(keys_first, comp, stride, step_no, n, n_power2, tile_size, merge_cta_size, max_num_blocks);
        if(sizeof(KeyType) <= 8)  global_multistep_merge<3, unsigned int>(keys_first, comp, stride, step_no, n, n_power2, tile_size, merge_cta_size, max_num_blocks);
        if(sizeof(KeyType) <= 16) global_multistep_merge<2, unsigned int>(keys_first, comp, stride, step_no, n, n_power2, tile_size, merge_cta_size, max_num_blocks);
        global_multistep_merge<1, unsigned int>(keys_first, comp, stride, step_no, n, n_power2, tile_size, merge_cta_size, max_num_blocks);
        
        // Perform rest of the merge steps in shared memory
        // XXX TODO: dynamically choose between unsigned int & difference_type depending on n
        bitonic_tile_multistep_merge_closure<multistep, RandomAccessIterator, StrictWeakOrdering, unsigned int> closure1(keys_first, comp, step_no, n);
        launch_closure(closure1, num_blocks, cta_size_launch, tile_size_int * sizeof(int));
    }
}
} // end namespace detail
} // end namespace cuda
} // end namespace backend
} // end namespace detail
} // end namespace thrust


__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END

#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC

