#ifndef PARALLELMPI_H
#define PARALLELMPI_H

#include <string>
#include <vector>

#include <mpi.h>

#include "../Input/PicParams.h"
#include "../Tool/Macros.h"
#include "../Species/CellParticles.h"
#include "../Tool/Log.h"

class PicParams;
class Species;
class PhysicalField;
class Grid;
class IO;

template<class Type>
class Field;


// --------------------------------------------------------------------------------------------------------------------
//Class ParallelMPI
// --------------------------------------------------------------------------------------------------------------------
class ParallelMPI {

public:

    ParallelMPI(int* argc, char*** argv);
    ParallelMPI(PicParams* params_, ParallelMPI* pmpi_);

    virtual ~ParallelMPI();

    void init();

    //Create MPI communicator
    virtual void create_topology( PicParams* params )
    {

    };

    virtual void exchange_particles(Species* species, PicParams* params, int iDim){};

    //Sum rho and densities on 2 x oversize[]
    void sumRho( PhysicalField* fields );
    //Sum rho and all J on the shared domain between processors
    //2 x oversize + 1 ( + 1 if direction is dual )
    void sumRhoJ( PhysicalField* fields );
    //Sum rho_s and all J_s on the shared domain between processors
    void sumRhoJs( PhysicalField* fields, int ispec, bool currents );

    //Method to identify the rank 0 MPI process
    inline bool isMaster() {
        return (pic_rk==0);
    }
    //Method to synchronize MPI process in the current MPI communicator
    inline void barrier() {
        MPI_Barrier( PMPI_COMM_WORLD );
    }
    //Return MPI_Comm_rank
    inline int getRank() {
        return pic_rk;
    }
    //Return MPI_Comm_size
    inline int getSize() {
        return pic_sz;
    }
    //Return global starting (including oversize, ex : rank 0 returns -oversize) index for direction i
    //\param i direction
    //@see cell_starting_global_index
    inline int    getCellStartingGlobalIndex(int i) const {
        return params->cell_starting_global_index[i];
    }
    //Return real (excluding oversize) min coordinates (ex : rank 0 retourn 0.) for direction i
    //@see local_min
    inline double getDomainLocalMin(int i) const {
        return params->local_min[i];
    }
    //Return real (excluding oversize) max coordinates for direction i
    //@see local_max
    inline double getDomainLocalMax(int i) const {
        return params->local_max[i];
    }

    //Should be pure virtual, see child classes
    virtual bool isEastern(){log_warning<<"Problem";return false;}
    //Should be pure virtual, see child classes
    virtual bool isWestern(){log_warning<<"Problem";return false;}
    //Should be pure virtual, see child classes
    virtual bool isSouthern(){log_warning<<"Problem";return false;}
    //Should be pure virtual, see child classes
    virtual bool isNorthern(){log_warning<<"Problem";return false;}


    //MPI process Id in the current communicator
    int pic_sz;
    //Number of MPI process in the current communicator
    int pic_rk;

    int globalNbrParticles(Species* species);

    virtual void scatterGrid( Grid* grid ){};

    //gatherField0: values at the nodes, like electric field, electric potential
    virtual void gatherField0( Field<double>* field_global ,Field<double>* field  ){};
    //gatherField1: values at the nodes, like density
    virtual void gatherField1( Field<double>* field_global ,Field<double>* field  ){};
    //gatherField2: average values in the cells, like temperature, velocity
    virtual void gatherField2( Field<double>* field_global ,Field<double>* field  ){};
    //gatherField1_all: gather fields to all processes
    virtual void gatherField1_all( Field<double>* field_global ,Field<double>* field  ){};

    virtual void scatterField( Field<double>* field_global ,Field<double>* field ){};


    //broadcast a string in current communicator
    void bcast(std::string& val);
    void bcast_double(double* buffer, int N, int root_rank);
    void reduce_sum_int(int* src, int* des, int n);
    void all_reduce_sum_int(int* src, int* des, int n);
    void reduce_sum_double(double* src, double* des, int n);
    void all_reduce_sum_double(double* src, double* des, int n);
    void reduce_sum_field(Field<double>* field_send, Field<double>* field_recv);


    //global dimensions (including ghost grid points) of each process stored in ROOT process for gathering
    //and scattering, only dims_global_gather[0] is used for 1d.
	int dims_global_gather[3];

    //variables for MPI_GaterV and MPI_scatter (grid, rho, phi)
	int* grid_global_gather;
	double* field_global_gather;

	//local dimensions of each process stored in ROOT process for gathering and scattering
    //can not use 2d vector, because the memory of date is not contigous for MPI
	int* dims_gather;
	int* dims_gather_temp;

    std::vector <int> recv_cnt, recv_disp;
    std::vector <int> send_cnt, send_disp;
    std::vector <int> recv_cnt_VDF, recv_disp_VDF, recv_cnt_VDF_temp;
    std::vector <int> send_cnt_VDF;

    PicParams* params;
    IO* io;

protected:
    //Global MPI Communicator
    MPI_Comm PMPI_COMM_WORLD;

    //Number of dimensions
    int ndims_;
    //Number of MPI process per direction in the cartesian topology
    int* number_of_procs;

    //Array of coordinates in the cartesian topology
    int* coords_;
    //Periodicity of the geometry
    int* periods_;
    //Reorder MPI rank (not)
    int reorder_;
    //Number of neighbors per directions (=2)
    int nbNeighbors_;
    //Id of neighbors, per direction (up to 3), per side (2)
    int neighbor_[3][2];

    MPI_Datatype mpi_particle_type;

    int test_number;
};

#endif
