#include <cmath>
#include <cstring>
#include <iostream>
#include <sstream>

#include "ParallelMPI.h"
#include "Log/Log.h"


using namespace std;

ParallelMPI::ParallelMPI(int* argc, char*** argv)
{
    //MPI_Init(argc, argv);

    int mpi_provided;

    MPI_Init_thread( argc, argv, MPI_THREAD_FUNNELED, &mpi_provided );
    if (mpi_provided == MPI_THREAD_SINGLE)
    {
        cout<<"openMP not supported"<<endl;
    }
    //cout<<MPI_THREAD_SINGLE<<" "<<MPI_THREAD_FUNNELED<<" "<<mpi_provided<<endl;

    
    PMPI_COMM_WORLD = MPI_COMM_WORLD;
    MPI_Comm_size(PMPI_COMM_WORLD, &pic_sz);
    MPI_Comm_rank(PMPI_COMM_WORLD, &pic_rk);

}


ParallelMPI::~ParallelMPI()
{

}

void ParallelMPI::init()
{
    //make the random seed different
    srand( (unsigned)time(NULL) + pic_rk * pic_sz * 10.0 );

    //log_info<<"first random number: "<<(double)rand() / RAND_MAX;
}

void ParallelMPI::finish()
{
    int status = 0;
    MPI_Finalized(&status);
    if (!status) MPI_Finalize();
}


void ParallelMPI::reduce_sum_double(double* src, double* des, int n)
{
    MPI_Reduce(src, des, n, MPI_DOUBLE, MPI_SUM, 0, PMPI_COMM_WORLD);
}

void ParallelMPI::all_reduce_sum_double(double* src, double* des, int n)
{
    MPI_Allreduce(src, des, n, MPI_DOUBLE, MPI_SUM, PMPI_COMM_WORLD);
}


void ParallelMPI::reduce_sum_int(int* src, int* des, int n)
{
    MPI_Reduce(src, des, n, MPI_INT, MPI_SUM, 0, PMPI_COMM_WORLD);
}

void ParallelMPI::all_reduce_sum_int(int* src, int* des, int n)
{
    MPI_Allreduce(src, des, n, MPI_INT, MPI_SUM, PMPI_COMM_WORLD);
}

void ParallelMPI::gatherv_double(double* src, int sendcount, double* des, int* recvcounts, int* displs, int root)
{
    MPI_Gatherv(src, sendcount, MPI_DOUBLE, des, recvcounts, displs, MPI_DOUBLE, root, PMPI_COMM_WORLD);
}

void ParallelMPI::gatherv_int(int* src, int sendcount, int* des, int* recvcounts, int* displs, int root)
{
    MPI_Gatherv(src, sendcount, MPI_INT, des, recvcounts, displs, MPI_INT, root, PMPI_COMM_WORLD);
}
