#include "collectives.h"
//#include "timer.h"

#include <mpi.h>

#include <stdexcept>
#include <iostream>
#include <vector>
#include <cmath>

#define NO_DEVICE -1

void TestCollectivesCPU(std::vector<size_t>& sizes, std::vector<size_t>& iterations, int argc, char* argv[]) {
    // Initialize on CPU (no GPU device ID).
    InitCollectives(NO_DEVICE, argc, argv);

    // Get the MPI size and rank.
    int mpi_size;
    if(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size) != MPI_SUCCESS)
        throw std::runtime_error("MPI_Comm_size failed with an error");
    std::cout << "mpi_size:" << mpi_size <<std::endl ;

    int mpi_rank;
    if(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank) != MPI_SUCCESS)
        throw std::runtime_error("MPI_Comm_rank failed with an error");
    std::cout << "mpi_rank:" << mpi_rank <<std::endl ;
    std::vector<float> vec_seconds; // Used to calculate variance!

    //timer::Timer timer;
    for(size_t i = 0; i < sizes.size(); i++) {
        size_t size = sizes[i];
        size_t iters = iterations[i];

        float* data = new float[size];
        float seconds = 0.0f;
        vec_seconds.clear();

        for(size_t iter = 0; iter < iters; iter++) {
            // Initialize data as a block of ones, which makes it easy to check for correctness.
            for(size_t j = 0; j < size; j++) {
                data[j] = 1.0f;
            }

            float* output;
            //timer.start();
            float start_time = MPI_Wtime();
            RingAllreduce(data, size, &output);
            seconds += MPI_Wtime() - start_time; //timer.seconds();
            vec_seconds.push_back(MPI_Wtime() - start_time);

            // Check that we get the expected result.
            for(size_t j = 0; j < size; j++) {
                if(output[j] != (float) mpi_size) {
                    std::cerr << "Unexpected result from allreduce: " << data[j] << std::endl;
                    return;
                }
            }
            delete[] output;
        }

        if(mpi_rank == 0) {
            float mean =  seconds / iters;
            float variance = 0.0f;
            assert(vec_seconds.size() == iters);
            for(int i = 0; i < vec_seconds.size(); i++)
            {
                variance += (vec_seconds[i] - mean) *  (vec_seconds[i] - mean); 
            }
            variance = sqrt(variance / iters);
            std::cout << "Verified allreduce for size "
                << size
                << " ("
                << mean
                << " s per iteration)" 
                << " variance(s):" << variance
                << std::endl;
        }

        delete[] data;
    }
}


// Test program for baidu-allreduce collectives, should be run using `mpirun`.
int main(int argc, char** argv) {
    //if(argc != 2) {
    //    std::cerr << "Usage: ./allreduce-test (cpu|gpu)" << std::endl;
     //   return 1;
    //}
    //std::string input(argv[1]);
    std::string input("cpu");

    // Buffer sizes used for tests.
    int tmp_size[] = {
        0, 
        64, // 256 B
        256, // 1024 B, 1 KB
        1024, // 4 KB 
        4096, // 16 KB 
        16384, // 64 KB

        65536, // 256 KB 
        262144, // 1024 KB, 1MB
        1048576, // 4096KB, 4MB
        4194304,// 16 MB,
        8388608, // 32 MB,
        16777216, // 64 MB 

        33554432, // 128 MB
        67108864, // 256 MB
        134217728 // 512 MB
        }; 
    int tmp_iter[] = {500, 500, 500, 500, 500, 500,/**/ 200, 100, 100, 50, 50, 50, /**/ 20, 10,10};
    int tmp_len = sizeof(tmp_iter) / sizeof(int);
    std::cout << "test data num:" << tmp_len << std::endl;
    std::cout<<"size of 1f:" << sizeof(float) <<  std::endl;
    
    std::vector<size_t> buffer_sizes;
    std::vector<size_t> iterations;
    for(int i =0 ; i<tmp_len; i++)
    {
        buffer_sizes.push_back(tmp_size[i]);
        iterations.push_back(tmp_iter[i]);
    }


    // Test on either CPU and GPU.
    if(input == "cpu") {
        TestCollectivesCPU(buffer_sizes, iterations, argc, argv);
    // } else if(input == "gpu") {
    //     TestCollectivesGPU(buffer_sizes, iterations);
    } else {
        std::cerr << "Unknown device type: " << input << std::endl
                  << "Usage: ./allreduce-test (cpu|gpu)" << std::endl;
        return 1;
    }

    // Finalize to avoid any MPI errors on shutdown.
    MPI_Finalize();

    return 0;
}
