
#include <cuda.h>
#include <omp.h>
#include <stdio.h>

#include "cuda_util.h"
#include "cush_types.h"
#include "cush_alltoall.h"
#include "cush_util.h"

void cush_alltoall(cush_dbuff_t* src_buff, cush_dbuff_t* dst_buff){
    
    if(src_buff->ndevices != dst_buff->ndevices){
        fprintf(stderr, "Unequal number of devices involved in cush_all-to-all\n");
        exit(EXIT_FAILURE);
    }

    int ndevices = src_buff->ndevices;

    /**
     * Every node sends a message of size 'm' to every other node
     *
     * MPI implemented this as an extension of all_gather()
     **/
    #pragma omp parallel num_threads(src_buff->ndevices)
    {
        int th_id = omp_get_thread_num();

        int src_id = src_buff->id[th_id];
        int src_idx = th_id;
        int peer_id = 0; // this will be reset
        int peer_idx = th_id;

        int msg_size = src_buff->size[src_idx]/ndevices;

        if(src_buff->size[src_idx]%ndevices != 0){
             fprintf(stderr, "Message size is not uniform across devices in cush_all-to-all\n");
             exit(EXIT_FAILURE);
        }

        int i;
        for(i = 0; i < ndevices; i++){
            // set the peer device id
            peer_id = dst_buff->id[peer_idx];

            // Check for size mismatch
            if(src_buff->size[src_idx] != dst_buff->size[peer_idx]){
                fprintf(stderr, "Size mismatch in cush_all-to-all\n");
                exit(EXIT_FAILURE);
            }

            // set the active cuda device based on peer_id
            cudaSetDevice(peer_id);
            checkCUDAError("Setting all-to-all device failed", src_id);

            // not a peer to peer call if copying within a single device
            if(src_id == peer_id){
                cudaMemcpyAsync( (void*)(((char*)(dst_buff->ptr[peer_idx]))+(src_idx*msg_size)), 
                            (void*)(((char*)(src_buff->ptr[src_idx])) +(peer_idx*msg_size)), 
                            msg_size,
                            cudaMemcpyDeviceToDevice);
                checkCUDAError("all-to-all self copy failed", src_id);
            }
            // different device ids require a memcpy peer
            else{
                cudaMemcpyPeerAsync( (void*)(((char*)(dst_buff->ptr[peer_idx]))+(src_idx*msg_size)),
                                peer_id, 
                                (void*)(((char*)(src_buff->ptr[src_idx]))+(peer_idx*msg_size)),
                                src_id, 
                                msg_size);
                checkCUDAError("all-to-all peer copy failed", src_id, peer_id);
            }

            // increment the peer to cycle around the ring
            peer_idx=(peer_idx+1)%ndevices;
        }
    } // close of open mp
}

/* vim: set sw=4 sts=4 et foldmethod=syntax syntax=c : */
