#include "../Parallel/ParallelMPI.h"

#include <cmath>
#include <cstring>

#include <iostream>
#include <sstream>

#include "../Input/PicParams.h"
#include "../Tool/Macros.h"

#include "../PhysicalField/PhysicalField.h"
#include "../Field/Field.h"
#include "../Species/Species.h"
#include "../Grid/Grid.h"
#include "../IO/IO.h"

using namespace std;

ParallelMPI::ParallelMPI(int* argc, char*** argv)
{
    int mpi_provided;

    MPI_Init_thread( argc, argv, MPI_THREAD_FUNNELED, &mpi_provided );
    if (mpi_provided == MPI_THREAD_SINGLE)
    {
        cout<<"openMP not supported"<<endl;
    }

    PMPI_COMM_WORLD = MPI_COMM_WORLD;
    MPI_Comm_size( PMPI_COMM_WORLD, &pic_sz );
    MPI_Comm_rank( PMPI_COMM_WORLD, &pic_rk );
}

ParallelMPI::ParallelMPI(PicParams* params_, ParallelMPI* pmpi_) :
    params(params_)
{
    PMPI_COMM_WORLD = pmpi_->PMPI_COMM_WORLD;
    pic_sz = pmpi_->pic_sz;
    pic_rk = pmpi_->pic_rk;
    //MPI_Comm_size(PMPI_COMM_WORLD, &pic_sz);
    //MPI_Comm_rank(PMPI_COMM_WORLD, &pic_rk);

    init();
}

ParallelMPI::~ParallelMPI()
{
    int status = 0;
    MPI_Finalized( &status );
    if (!status) MPI_Finalize();

}

void ParallelMPI::init()
{
    ndims_ = params->n_dim_field;
    number_of_procs  = new int[ndims_];
    coords_  = new int[ndims_];
    periods_  = new int[ndims_];

    reorder_ = 0;
    nbNeighbors_ = 2;

    for(int i = 0; i < ndims_; i++) periods_[i] = 0;
    for(int i = 0; i < ndims_; i++) coords_[i] = 0;
    for(int i = 0; i < ndims_; i++)
    {
        number_of_procs[i] = 0;
    }

    for(int i_dim = 0; i_dim < 3; i_dim++)
    {
        for(int i = 0; i < nbNeighbors_; i++)
        {
            neighbor_[i_dim][i] = MPI_PROC_NULL;
        }
    }

    //make the random seed different
    srand( (unsigned)time(NULL) + pic_rk * pic_sz * 10.0 );

    log_info<<"first random number: "<<(double)rand() / RAND_MAX;

}

void ParallelMPI::bcast( string& val )
{
    int charSize=0;
    if (isMaster()) charSize = val.size()+1;
    MPI_Bcast(&charSize, 1, MPI_INT, 0, PMPI_COMM_WORLD);

    char tmp[charSize];
    strcpy(tmp, val.c_str());
    MPI_Bcast(&tmp, charSize, MPI_CHAR, 0, PMPI_COMM_WORLD);

    if (!isMaster()) val=tmp;

}



void ParallelMPI::sumRho( PhysicalField* fields )
{


}

void ParallelMPI::sumRhoJ( PhysicalField* fields )
{

}
void ParallelMPI::sumRhoJs( PhysicalField* fields, int ispec, bool currents )
{

}

int ParallelMPI::globalNbrParticles(Species* species)
{
    int nParticles(0);
    int locNbrParticles(0);
    locNbrParticles = species->get_number_of_particles();
    MPI_Reduce(&locNbrParticles, &nParticles, 1, MPI_INT, MPI_SUM, 0, PMPI_COMM_WORLD);
    return nParticles;
}


void ParallelMPI::bcast_double(double* buffer, int N, int root_rank)
{
    MPI_Bcast(buffer, N, MPI_DOUBLE, root_rank, PMPI_COMM_WORLD);
}

void ParallelMPI::reduce_sum_int(int* src, int* des, int n)
{
    MPI_Reduce(src, des, n, MPI_INT, MPI_SUM, 0, PMPI_COMM_WORLD);
}

void ParallelMPI::all_reduce_sum_int(int* src, int* des, int n)
{
    MPI_Allreduce(src, des, n, MPI_INT, MPI_SUM, PMPI_COMM_WORLD);
}

void ParallelMPI::reduce_sum_double(double* src, double* des, int n)
{
    MPI_Reduce(src, des, n, MPI_DOUBLE, MPI_SUM, 0, PMPI_COMM_WORLD);
}

void ParallelMPI::all_reduce_sum_double(double* src, double* des, int n)
{
    MPI_Allreduce(src, des, n, MPI_DOUBLE, MPI_SUM, PMPI_COMM_WORLD);
}

void ParallelMPI::reduce_sum_field(Field<double>* field_send, Field<double>* field_recv)
{
    int count1, count2;
    count1 = field_send->dims_global_;
    count2 = field_recv->dims_global_;
    if(count1 != count2 || (field_send->dims_).size() != (field_recv->dims_).size())
    {
        log_error<<"reduce_sum_field error in ParallelMPI!!!";
    }
    MPI_Reduce(field_send->data_, field_recv->data_, count1, MPI_DOUBLE, MPI_SUM, 0, PMPI_COMM_WORLD);
}
