#include <stdio.h>
#include <stdlib.h>

#include "cuda.h"
#include "nccl.h"
#include "cuda_runtime_api.h"
#include "cuda_runtime.h"

#define CUDACHECK(cmd) do {                         \
  cudaError_t err = cmd;                            \
  if (err != cudaSuccess) {                         \
    printf("Failed: Cuda error %s:%d '%s'\n",       \
        __FILE__,__LINE__,cudaGetErrorString(err)); \
    exit(EXIT_FAILURE);                             \
  }                                                 \
} while (0)


#define NCCLCHECK(cmd) do {                         \
  ncclResult_t res = cmd;                           \
  if (res != ncclSuccess) {                         \
    printf("Failed, NCCL error %s:%d '%s'\n",       \
        __FILE__,__LINE__,ncclGetErrorString(res)); \
    exit(EXIT_FAILURE);                             \
  }                                                 \
} while (0)


__global__ void data_show(float* data, int num) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num) {
        printf("data[%2d] = %f\n", idx, data[idx]);
    }
}

__global__ void data_set(float* data, int num) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num) {
        data[idx] = (float)1.0;
    }
}

// 单进程(线程)多设备示例
int main(int argc, char** argv) {
    ncclComm_t comms[2];                // 我这A10的机器只有2卡
    int ndev    = 2;
    int nums    = 32 * 1024 * 1024;     // 发送的数据量大小(数量)
    int devs[2] = {0, 1};


    //! 1. 初始化每张gpu发送数据的内存地址(主机端)
    typedef float T;
    T** sendbuff = (T**)malloc(ndev * sizeof(T*));
    T** recvbuff = (T**)malloc(ndev * sizeof(T*));
    // TODO: 每张gpu设备一个流么， why？
    cudaStream_t* streams  = (cudaStream_t*)malloc(sizeof(cudaStream_t) * ndev);   

    for (int i = 0; i < ndev; i++) {
        CUDACHECK(cudaSetDevice(i));
        CUDACHECK(cudaMalloc((void**)(sendbuff + i), nums * sizeof(T)));
        CUDACHECK(cudaMalloc((void**)(recvbuff + i), nums * sizeof(T)));
        // 初始化为不同的数据便于观测收发情况
        CUDACHECK(cudaStreamCreate(streams + i));
        // CUDACHECK(cudaMemset(sendbuff[i], 1, nums * sizeof(T)));
        data_set<<<32*1024, 1024, 0, streams[i]>>>(sendbuff[i], nums);
        CUDACHECK(cudaMemset(recvbuff[i], 0, nums * sizeof(T)));
    }

    //! 2. 创建nccl通信组
    // ncclResult_t  ncclCommInitAll(ncclComm_t* comm, int ndev, const int* devlist);
    NCCLCHECK(ncclCommInitAll(comms, ndev, devs));
    NCCLCHECK(ncclGroupStart());   // 单线程管理多devs时必须调用Group API;
    for (int i = 0; i < ndev; i++) {
        // ncclResult_t  ncclAllReduce(const void* sendbuff, void* recvbuff, size_t count,
        //                              ncclDataType_t datatype, ncclRedOp_t op, 
        //                              ncclComm_t comm, cudaStream_t stream);
        NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i],
                                nums,  ncclFloat, ncclSum, comms[i], streams[i]));
    }
    NCCLCHECK(ncclGroupEnd());      // Group End API;

    //! 同步每个stream确保集合通信操作完成
    for (int i = 0; i < ndev; ++i) {    
        CUDACHECK(cudaSetDevice(i));
        CUDACHECK(cudaStreamSynchronize(streams[i]));

        data_show<<<1, 32, 0, streams[i]>>>(recvbuff[i], 32);
        CUDACHECK(cudaStreamSynchronize(streams[i]));
        printf("=================================\n");
    }

    for (int i = 0; i < ndev; ++i) {
        CUDACHECK(cudaSetDevice(i));
        CUDACHECK(cudaFree(sendbuff[i]));
        CUDACHECK(cudaFree(recvbuff[i]));
    }

    for(int i = 0; i < ndev; ++i)
      NCCLCHECK(ncclCommDestroy(comms[i]));

    printf("ncclAllReduce 集合通信操作完成...\n");
    return 0;
}

