#include "../Parallel/ParallelMPI1D.h"

#include <cmath>
#include <cstring>
#include <algorithm>
#include <string>
#include <fstream>

#include <mpi.h>
#include "../Species/Species.h"
#include "../Species/Species1D.h"

#include "../PhysicalField/PhysicalField.h"
#include "../Field/Field1D.h"

#include "../Tool/Macros.h"

#include "../Field/Field1D.h"

using namespace std;

//---------------------------------------------------------------------------------------------------------------------
//ParallelMPI1D: creator for PIC MPI environment in 1D cartesian
//---------------------------------------------------------------------------------------------------------------------
ParallelMPI1D::ParallelMPI1D(PicParams* params_, ParallelMPI* pmpi_) :
    ParallelMPI(params_, pmpi_)
{
    create_topology(params);
}


//---------------------------------------------------------------------------------------------------------------------
//ParallelMPI1D: creator for PIC MPI environment in 1D cartesian
//---------------------------------------------------------------------------------------------------------------------
ParallelMPI1D::~ParallelMPI1D()
{
    delete [] number_of_procs;
    delete [] periods_;
    delete [] coords_;

    if ( PMPI_COMM_1D != MPI_COMM_NULL) MPI_Comm_free(&PMPI_COMM_1D);

}
//---------------------------------------------------------------------------------------------------------------------
//ParallelMPI1D: create the topology for PIC MPI environment in 1D cartesian
//---------------------------------------------------------------------------------------------------------------------
void ParallelMPI1D::create_topology(PicParams* params)
{
    create_particle_type();

    for (int i=0 ; i<params->n_dim_field ; i++)
    {
        params->dims_space_global[i] = round(params->sim_length[i]/params->cell_length[i]);
        params->dims_field_global[i] = params->dims_space_global[i] + 1;
        //corret sim_length
        params->sim_length[i] = params->dims_space_global[i] * params->cell_length[i];

        log_info<<"Total number of cells in direction " << i << ": " << params->dims_space_global[i];
    }

    number_of_procs[0] = pic_sz;

    log_info<<"MPI Domain decomposition : " << pic_sz;

    //Geometry periodic in x
    if ( (params->bc_em_type_x[0]=="periodic") || (params->bc_em_type_x[1]=="periodic") ) {
        periods_[0] = 1;
        log_info<<"Periodic geometry in x-direction";
    }

    MPI_Cart_create( PMPI_COMM_WORLD, ndims_, number_of_procs, periods_, reorder_, &PMPI_COMM_1D );
    MPI_Cart_coords( PMPI_COMM_1D, pic_rk, ndims_, coords_ );


    for (int iDim=0 ; iDim<ndims_ ; iDim++)
    {
        MPI_Cart_shift( PMPI_COMM_1D, iDim, 1, &(neighbor_[iDim][0]), &(neighbor_[iDim][1]) );
    }


    for (int i=0 ; i<params->n_dim_field ; i++)
    {
        params->dims_space[i] = params->dims_space_global[i] / number_of_procs[i];
        params->cell_starting_global_index[i] = coords_[i]*(params->dims_space_global[i] / number_of_procs[i]);

        if ( number_of_procs[i]*params->dims_space[i] != params->dims_space_global[i] )
        {
            if (coords_[i]==number_of_procs[i]-1)
            {
                params->dims_space[i] = params->dims_space_global[i] - params->dims_space[i]*(number_of_procs[i]-1);
            }
        }

        if ( params->dims_space[i] <= 2*params->oversize[i] )
        {
            log_warning<<"dims_space < 2*params->oversize " << i;
        }

        params->local_min[i] = (params->cell_starting_global_index[i]                  )*params->cell_length[i];
        params->local_max[i] = (params->cell_starting_global_index[i]+params->dims_space[i])*params->cell_length[i];

        if (coords_[i]==number_of_procs[i]-1)
        {
            //log_info<<params->local_min[i]<<" "<<params->local_max[i]<<" "<<params->dims_space[i];
        }

        params->cell_starting_global_index[i] -= params->oversize[i];

    }

    for(int i=0; i < params->n_dim_field; i++)
    {
        params->dims_field[i] = params->dims_space[i] + 1 + 2 * params->oversize[i];
    }


    //calculate nspace_global_gather to gather and scatter the Field and Grid data
    dims_global_gather[0] = params->dims_space_global[0]+(1+2*params->oversize[0])*number_of_procs[0];

    field_global_gather = new double[dims_global_gather[0]];

    dims_gather         = new int[pic_sz];
    dims_gather_temp    = new int[pic_sz];
    for (int i=0;i< pic_sz ; i++)
    {
    	if(i==pic_rk){
    		dims_gather_temp[i]     = params->dims_space[0] + 1 + 2*params->oversize[0];
            //cout<<"dims_gather "<<params->dims_space[0]<<" "<<params->number_of_procs[0]<<endl;
    	}
    	else {
    		dims_gather_temp[i]     = 0;
    	}

    }

    MPI_Allreduce(dims_gather_temp, dims_gather, pic_sz, MPI_INT,MPI_SUM, PMPI_COMM_1D);

    recv_disp.resize(pic_sz);
    recv_cnt.resize(pic_sz);
    send_disp.resize(pic_sz);
    send_cnt.resize(pic_sz);

    for(int i = 0; i < pic_sz; i++)
    {
        recv_cnt[i] = dims_gather[i];
        send_cnt[i] = dims_gather[i];
        if(i == 0){
            recv_disp[i] = 0;
            send_disp[i] = 0;
        }
        else{
            recv_disp[i] = recv_disp[i-1] + recv_cnt[i-1];
            send_disp[i] = send_disp[i-1] + send_cnt[i-1];
        }
    }

    //-------------------------------------------------------
    //Compute & store the ranks of processes dealing with the
    //corner of the simulation box
    //-------------------------------------------------------

    extrem_ranks[0][0] = 0;
    int rank_min =  0;
    if (coords_[0] == 0) {
        rank_min = pic_rk;
    }
    MPI_Allreduce(&rank_min, &extrem_ranks[0][0], 1, MPI_INT, MPI_SUM, PMPI_COMM_1D);
    extrem_ranks[0][1] = 0;
    int rank_max = 0;
    if (coords_[0]==number_of_procs[0]-1) {
        rank_max = pic_rk;
    }
    MPI_Allreduce(&rank_max, &extrem_ranks[0][1], 1, MPI_INT, MPI_SUM, PMPI_COMM_1D);


}

//be aware of struct padding problem
void ParallelMPI1D::exchange_particles(Species* species, PicParams* params, int iDim)
{
    Species1D* species1D = static_cast<Species1D*>(species);

    auto& particles = species1D->particles_;
    auto& exchange_particles_temp = species1D->exchange_particles_;

    int n_part_send = exchange_particles_temp.size();
    int n_part_recv;
    int buff_index_recv_sz[2];
    //cout<<"n_part_send  "<<iDim<<"  "<<n_part_send<<endl;

    vector<Particle<1>> diagonal_particles;
    diagonal_particles.resize(0);

    vector< vector< Particle<1> > > part_vector_send;
    part_vector_send.resize(2);
    vector< vector< Particle<1> > > part_vector_recv;
    part_vector_recv.resize(2);

    for (int i = 0 ; i < n_part_send ; i++)
    {
        if ( exchange_particles_temp[i].position[iDim] < params->local_min[iDim])
        {
            part_vector_send[0].push_back(exchange_particles_temp[i]);
        }
        else if ( exchange_particles_temp[i].position[iDim] >= params->local_max[iDim])
        {
            part_vector_send[1].push_back(exchange_particles_temp[i]);
        }
        else
        {
            diagonal_particles.push_back(exchange_particles_temp[i]);
        }
    }


    MPI_Status sstat    [2];
    MPI_Status rstat    [2];
    MPI_Request srequest[2];
    MPI_Request rrequest[2];

    //======================================= exchange particles number =====================
    for (int iNeighbor=0 ; iNeighbor<nbNeighbors_ ; iNeighbor++)
    {
        n_part_send = 0;
        if (neighbor_[iDim][iNeighbor]!=MPI_PROC_NULL)
        {
            n_part_send = (part_vector_send[iNeighbor]).size();
            MPI_Isend( &n_part_send, 1, MPI_INT, neighbor_[iDim][iNeighbor], 0, PMPI_COMM_1D, &(srequest[iNeighbor]) );
        }

        buff_index_recv_sz[(iNeighbor+1)%2] = 0;
        if (neighbor_[iDim][(iNeighbor+1)%2]!=MPI_PROC_NULL)
        {
            buff_index_recv_sz[(iNeighbor+1)%2] = 0;
            //MPI_Irecv( &(buff_index_recv_sz[(iNeighbor+1)%2]), 1, MPI_INT, neighbor_[iDim][(iNeighbor+1)%2], 0, PMPI_COMM_1D, &(rrequest[(iNeighbor+1)%2]) );
            MPI_Recv( &(buff_index_recv_sz[(iNeighbor+1)%2]), 1, MPI_INT, neighbor_[iDim][(iNeighbor+1)%2], 0, PMPI_COMM_1D, &(rstat[(iNeighbor+1)%2]) );
        }
        barrier();
    }
    barrier();

    //======================= Wait for end of communications over number of particles ===============
    for (int iNeighbor=0 ; iNeighbor<nbNeighbors_ ; iNeighbor++)
    {
        if (neighbor_[iDim][iNeighbor]!=MPI_PROC_NULL)
        {
            MPI_Wait( &(srequest[iNeighbor]), &(sstat[iNeighbor]) );
        }
        if (neighbor_[iDim][(iNeighbor+1)%2]!=MPI_PROC_NULL)
        {
            //MPI_Wait( &(rrequest[(iNeighbor+1)%2]), &(rstat[(iNeighbor+1)%2]) );
            if (buff_index_recv_sz[(iNeighbor+1)%2]!=0)
            {
              part_vector_recv[(iNeighbor+1)%2].resize(buff_index_recv_sz[(iNeighbor+1)%2]);
            }
        }
    }
    barrier();

    //=========================== exchange particles =============================


    for (int iNeighbor=0 ; iNeighbor<nbNeighbors_ ; iNeighbor++)
    {
        n_part_send = part_vector_send[iNeighbor].size();
        if ( (neighbor_[iDim][iNeighbor]!=MPI_PROC_NULL) && (n_part_send!=0) )
        {
            double x_max = params->cell_length[iDim]*( params->dims_space_global[iDim] );
            for (int iPart=0 ; iPart<n_part_send ; iPart++) {
                if (periods_[iDim]==1) {
                    //Enabled periodicity
                    if ( ( iNeighbor==0 ) &&  (coords_[iDim] == 0 ) &&( part_vector_send[iNeighbor][iPart].position[iDim] < 0. ) )
                    {
                        part_vector_send[iNeighbor][iPart].position[iDim]     += x_max;
                    }
                    else if ( ( iNeighbor==1 ) &&  (coords_[iDim] == number_of_procs[iDim]-1 ) && ( part_vector_send[iNeighbor][iPart].position[iDim] >= x_max ) )
                    {
                        part_vector_send[iNeighbor][iPart].position[iDim]     -= x_max;
                    }
                }
            }

            MPI_Isend( &part_vector_send[iNeighbor][0], n_part_send, mpi_particle_type, neighbor_[iDim][iNeighbor], 0, PMPI_COMM_1D, &(srequest[iNeighbor]) );
        }

        n_part_recv = buff_index_recv_sz[(iNeighbor+1)%2];
        if ( (neighbor_[iDim][(iNeighbor+1)%2]!=MPI_PROC_NULL) && (n_part_recv!=0) )
        {
            //MPI_Irecv( &part_vector_recv[(iNeighbor+1)%2][0], n_part_recv, mpi_particle_type,  neighbor_[iDim][(iNeighbor+1)%2], 0, PMPI_COMM_1D, &(rrequest[(iNeighbor+1)%2]) );
            MPI_Recv( &part_vector_recv[(iNeighbor+1)%2][0], n_part_recv, mpi_particle_type,  neighbor_[iDim][(iNeighbor+1)%2], 0, PMPI_COMM_1D, &(rstat[(iNeighbor+1)%2]) );
        }
        barrier();
    }


    //===================== add recieved particles to their cells =========================
    int index[1];
    for (int iNeighbor=0 ; iNeighbor<nbNeighbors_ ; iNeighbor++)
    {
        n_part_send = part_vector_send[iNeighbor].size();
        n_part_recv = buff_index_recv_sz[(iNeighbor+1)%2];

        //cout<<"n_part_send and recv "<<n_part_send<<"  "<<n_part_recv<<endl;

        if ( (neighbor_[iDim][iNeighbor]!=MPI_PROC_NULL) && (n_part_send!=0) )
        {
            MPI_Wait( &(srequest[iNeighbor]), &(sstat[iNeighbor]) );
        }

        if ( (neighbor_[iDim][(iNeighbor+1)%2]!=MPI_PROC_NULL) && (n_part_recv!=0) )
        {
            for (int i_particle = 0 ; i_particle < n_part_recv; i_particle++ )
            {
                index[0] = floor( (part_vector_recv[(iNeighbor+1)%2][i_particle].position[0] - params->local_min[0]) * params->cell_length_inv[0] );
                if(index[0] >= 0 && index[0] < params->dims_space[0])
                {
                    //cout<<"index i,j "<<index[0]<<"  "<<index[1]<<endl;
                    particles[index[0]]->add_new_particle(part_vector_recv[(iNeighbor+1)%2][i_particle]);
                }
                else
                {
                    diagonal_particles.push_back(part_vector_recv[(iNeighbor+1)%2][i_particle]);
                }

            }
        }

    }
    barrier();

    //exchange_particles_temp = diagonal_particles;
    //diagonal_particles.clear();

    if(diagonal_particles.size() > 0)
    {
        cout<<"diagonal_particles number: "<<diagonal_particles.size()<<endl;
    }  
}

void ParallelMPI1D::create_particle_type()
{
    Particle<1> particle;

    int blocklens_array[2];
    MPI_Aint displs_array[2];
    MPI_Datatype old_type_array[2];

    old_type_array[0] = MPI_DOUBLE;
    old_type_array[1] = MPI_DOUBLE;

    //blocklens_array[0]: n dims of position; blocklens_array[1]: n dims of velocity
    blocklens_array[0] = 1;
    blocklens_array[1] = 3;

    MPI_Get_address(&particle.position[0], &displs_array[0]);
    MPI_Get_address(&particle.velocity[0], &displs_array[1]);

    displs_array[1] = displs_array[1] - displs_array[0];
    displs_array[0] = 0;

    MPI_Type_create_struct(2, blocklens_array, displs_array, old_type_array, &mpi_particle_type);
    MPI_Type_commit(&mpi_particle_type);
}


void ParallelMPI1D::gatherField0( Field<double>* field_global ,Field<double>* field  )
{

    int procs_rk;
    int iGlobal, jGlobal;
    int iGlobal_gather;
    int nx;

    Field1D<double>* f1D =  static_cast<Field1D<double>*>(field);
    Field1D<double>* f1D_global =  static_cast<Field1D<double>*>(field_global);
    nx = f1D_global->dims_[0];
    f1D_global->set_value(0.0);
    MPI_Gatherv(f1D->data_, send_cnt[pic_rk], MPI_DOUBLE, field_global_gather, &recv_cnt[0], &recv_disp[0], MPI_DOUBLE, 0, PMPI_COMM_1D);

    for(int iProcs = 0; iProcs < number_of_procs[0]; iProcs++)
    {
        procs_rk = iProcs;
        for(int i = 0; i < dims_gather[procs_rk]; i++)
        {
            iGlobal = iProcs * (dims_gather[0] - 2*params->oversize[0] -1) + i -params->oversize[0];
            if(iProcs == 0 && i < params->oversize[0] || iProcs == number_of_procs[0] -1 && i > dims_gather[procs_rk] - 1 - params->oversize[0]){
                iGlobal = abs((int)f1D_global->dims_[0] - abs(iGlobal) - 1);
            }

            iGlobal_gather = send_disp[procs_rk] + i;
            //if(iGlobal >= ii || jGlobal >= jj) cout<<"error "<<iGlobal<<" "<<iProcs<<" "<<dims_gather[0]<<" "<<oversize[0]<<endl;

            //the differance between gatherRho and gatherField exists only here
            f1D_global->data_[iGlobal] = field_global_gather[iGlobal_gather];
            //if(f1D_global->data_[iGlobal] != 0.0) cout<<"ereeee"; //f1D_global->data_[iGlobal]<<endl;
        }
    }


    f1D_global->data_[0] += f1D_global->data_[nx-1];
    f1D_global->data_[nx-1] = f1D_global->data_[0];

}

void ParallelMPI1D::gatherField1( Field<double>* field_global ,Field<double>* field  )
{

    int procs_rk;
    int iGlobal;
    int iGlobal_gather;
    int nx;

    Field1D<double>* f1D =  static_cast<Field1D<double>*>(field);

    Field1D<double>* f1D_global =  static_cast<Field1D<double>*>(field_global);

    nx = field_global->dims_[0];
    f1D_global->set_value(0.0);

    MPI_Gatherv(f1D->data_, send_cnt[pic_rk], MPI_DOUBLE, field_global_gather, &recv_cnt[0], &recv_disp[0], MPI_DOUBLE, 0, PMPI_COMM_1D);

    for(int iProcs = 0; iProcs < number_of_procs[0]; iProcs++)
    {
        procs_rk = iProcs;
        for(int i = 0; i < dims_gather[procs_rk]; i++)
        {
            iGlobal = iProcs * (dims_gather[0] - 2*params->oversize[0] -1) + i -params->oversize[0];
            if(iProcs == 0 && i < params->oversize[0] || iProcs == number_of_procs[0] -1 && i > dims_gather[procs_rk] - 1 - params->oversize[0]){
                iGlobal = abs((int)f1D_global->dims_[0] - abs(iGlobal) - 1);
            }

            iGlobal_gather = send_disp[procs_rk] + i;
            //if(iGlobal >= ii || jGlobal >= jj) cout<<"error "<<iGlobal<<" "<<iProcs<<" "<<dims_gather[0]<<" "<<oversize[0]<<endl;

            //the differance between gatherRho and gatherField exists only here
            f1D_global->data_[iGlobal] += field_global_gather[iGlobal_gather];
            //if(f1D_global->data_[iGlobal] != 0.0) cout<<"ereeee"; //f1D_global->data_[iGlobal]<<endl;
        }
    }

    f1D_global->data_[0] *= 2.0;
    f1D_global->data_[nx-1] *= 2.0;

}

void ParallelMPI1D::gatherField2( Field<double>* field_global ,Field<double>* field  )
{

    int procs_rk;
    int iGlobal, jGlobal;
    int iGlobal_gather;
    int nx;

    Field1D<double>* f1D =  static_cast<Field1D<double>*>(field);

    //for(int i = 0; i < f1D->dims_[0]; i++)
    //{
    //   (*f1D)(i) = i;
    //}


    Field1D<double>* f1D_global =  static_cast<Field1D<double>*>(field_global);
    nx = f1D_global->dims_[0];
    f1D_global->set_value(0.0);
    MPI_Gatherv(f1D->data_, send_cnt[pic_rk], MPI_DOUBLE, field_global_gather, &recv_cnt[0], &recv_disp[0], MPI_DOUBLE, 0, PMPI_COMM_1D);

    for(int iProcs = 0; iProcs < number_of_procs[0]; iProcs++)
    {
        procs_rk = iProcs;
        for(int i = 0; i < dims_gather[procs_rk]; i++)
        {
            iGlobal = iProcs * (dims_gather[0] - 2*params->oversize[0] -1) + i -params->oversize[0];
            if(iProcs == 0 && i < params->oversize[0] || iProcs == number_of_procs[0] -1 && i > dims_gather[procs_rk] - 1 - params->oversize[0]){
                iGlobal = abs((int)f1D_global->dims_[0] - abs(iGlobal) - 1);
            }

            iGlobal_gather = send_disp[procs_rk] + i;
            //if(iGlobal >= ii || jGlobal >= jj) cout<<"error "<<iGlobal<<" "<<iProcs<<" "<<dims_gather[0]<<" "<<oversize[0]<<endl;

            //the differance between gatherRho and gatherField exists only here
            f1D_global->data_[iGlobal] += field_global_gather[iGlobal_gather];
            //if(f1D_global->data_[iGlobal] != 0.0) cout<<"ereeee"; //f1D_global->data_[iGlobal]<<endl;
        }
    }
    //for temperate, it is average values in the cells, so the number of cells is one less than the number of nodes
    //so to plot, fill the last value using its left value
    f1D_global->data_[nx-1] = f1D_global->data_[nx-2];

}

void ParallelMPI1D::scatterField( Field<double>* field_global ,Field<double>* field )
{
    int procs_rk;
    int iGlobal, jGlobal;
    int iGlobal_gather;

    Field1D<double>* f1D =  static_cast<Field1D<double>*>(field);
    Field1D<double>* f1D_global =  static_cast<Field1D<double>*>(field_global);

    int ii;
    ii=f1D_global->dims_[0];

    iGlobal = 0;

    for(int iProcs = 0; iProcs < number_of_procs[0]; iProcs++)
    {
        procs_rk = iProcs;
        for(int i = 0; i < dims_gather[procs_rk]; i++)
        {
            iGlobal = iProcs * (dims_gather[0] - 2*params->oversize[0] -1) + i -params->oversize[0];
            if(iProcs == 0 && i < params->oversize[0] || iProcs == number_of_procs[0] -1 && i > dims_gather[procs_rk] - 1 - params->oversize[0]){
                iGlobal = abs((int)f1D_global->dims_[0] - abs(iGlobal) - 1);
            }

            iGlobal_gather = send_disp[procs_rk] + i;
            if(iGlobal >= ii) cout<<"error "<<iGlobal<<" "<<iProcs<<" "<<dims_gather[0]<<" "<<params->oversize[0]<<endl;

            field_global_gather[iGlobal_gather] = f1D_global->data_[iGlobal];

            //if(f1D_global->data_[iGlobal] != 0.0) cout<<"ereeee"; //f1D_global->data_[iGlobal]<<endl;
        }
    }

    MPI_Scatterv(field_global_gather, &send_cnt[0], &send_disp[0], MPI_DOUBLE, f1D->data_, recv_cnt[pic_rk], MPI_DOUBLE, 0, PMPI_COMM_1D);
}

