/* _NSPROJ_MPI_C_
 * 
 * SOLVES THREE DIMENSIONAL, INCOMPRESSIBLE NAVIER-STOKES
 * EQUATION WITH RECTANGULAR DOMAIN USING FINITE DIFFERENCING 
 * ON A STAGGERED GRID
 * UTILIZES THE PROJECTION METHOD.  THE CONSERVATIVE FORM
 * CONVECTION IS DISCRETIZED WITHOUT THE USE OF UPWIND TREATMENT; 
 * BOTH THE ADVECTION AND DIFUSSION TERMS ARE TREATED EXPLICITLY. 
 * THE CURRENT SETUP IS FOR A LID DRIVEN CAVITY PROBLEM WITH A WELL
 * KNOWN VELOCITY AT THE TOP AND NO-SLIP BOUNDARY CONDIITIONS AT
 * ALL OTHER BOUNDARIES
 * 
 * ORIGINALLY WRITTEN BY DR. ZHI-GANG FENG: UNIVERSITY OF NORTH TEXAS
 * 
 * PORTED TO C AND 3D BY JOHN LEIDEL: GRADUATE STUDENT
 * 
 */


#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "nsproj.h"
#include "mpi.h"

//----------------------------------------NSMPI_GETPROCDISTRO
// NSMPI_GETPROCDISTRO
// CALCULATES PER THREAD MESH DISTRIBUTION IN X, Y and Z
// -- FOR MESHES WHERE SIZE == 1; ALL MESH ELEMENTS LIE IN 
//    THREAD 1
// -- FOR MESHES WHERE SIZE ==2; MESH IS SPLIT EVENLY ACROSS
//    `X` DIMENSION
// -- FOR MESHES WHERE SIZE IS EVEN; MESH IS SPLIT INTO TWO 
//    HALVES IN `Y` DIMENSION; THEN SPLIT EVENLY IN `X` DIMENSION
// FOR MESHES WHERE SIZE IS ODD; MESH IS SPLIT EVENLY ACROSS
//    `X` DIMENSION
// PRE : ALL VARIABLES ARE NONZERO; SIZE > 0
// POST: PROCESSOR DISTRIBUTION IS SETUP AND SENT TO ALL RANKS
// RETURNS 0 ON SUCCESS, NONZERO OTHERWISE
//-----------------------------------------------------------
int nsmpi_getprocdistro( NSPROCS *procs, NSMESH mesh, int size, int rank )
{
    /* VARS */
    int ldx   = 0;
    int ldy   = 0;
    int curx  = 0;
    int i     = 0;
    int rtn   = 0;
    int th    = 0;   //-- TOP HALF OF THE MESH [x][y][*]
    int bh    = 0;   //-- BOTTOM HALF OF THE MESH [x][y][*]
    /* ---- */

    /* SANITY CHECK */
    if( rank == 0 )
    {
	if( (procs == NULL) || (size<1) || (size>_MAX_MPI_THREADS_) )
	{
	    MPI_Finalize();
	    return -1;
	}
	else if( (mesh.mx<=1) || (mesh.my<=1) || (mesh.mz<=1) )
	{
	    MPI_Finalize();
	    return -1;
	}
    }

    MPI_Barrier( MPI_COMM_WORLD );

    if( rank == 0 )
    {
	
	if( size == 1 )
	{
	    /* RANK 0 GETS ENTIRE MESH */
	    procs->startx[0] = 0;
	    procs->endx[0] = mesh.nx-1;
	    procs->starty[0] = 0;
	    procs->endy[0] = mesh.ny-1;
	    procs->startz[0] = 0;
	    procs->endz[0] = mesh.nz-1;
	}
	else if( size == 2 )
	{
	    /* TWO RANKS :: DISTRIBUTED OVER X's */
	    ldx = floor( (mesh.nx)/size );
	    procs->startx[0] = 0;
	    procs->endx[0]   = ldx;
	    procs->starty[0] = 0;
	    procs->endy[0]   = mesh.ny-1;
	    procs->startz[0] = 0;
	    procs->endz[0]   = mesh.nz-1;

	    procs->startx[1] = ldx+1;
	    procs->endx[1]   = mesh.nx-1;
	    procs->starty[1] = 0;
	    procs->endy[1]   = mesh.ny-1;
	    procs->startz[1] = 0;
	    procs->endz[1]   = mesh.nz-1;
	}
	else if( (size%2) == 0 )
	{
	    /* FOUND EVEN NUMBER OF RANKS >= 4*/
	    /* DISTRIBUTE OVER X's AND Y's */
	    bh = (size/2);
	    th = bh;
	    
	    ldx = floor( (mesh.nx/2)/(size/2) ); //-- DELINIATES `X` FOR HALF OF SIZE
	    ldy = floor( (mesh.ny)/2 );        //-- DELINIATES TOP HALF OF MESH 
	    curx = 0;
	    
	    /* BOTTOM HALF */
	    for( i=0; i<bh; i++ )
	    {
		procs->startx[i] = curx;
		procs->endx[i]   = curx+ldx;
		procs->starty[i] = 0;
		procs->endy[i]   = ldy;
		procs->startz[i] = 0;
		procs->endz[i]   = mesh.nz-1;

		curx = procs->endx[i] + 1;
	    }
	    
	    curx = 0;

	    /* TOP HALF */
	    for( i=th; i<size; i++ )
	    {
		procs->startx[i] = curx;
		procs->endx[i]   = curx+ldx;
		procs->starty[i] = ldy+1;
		procs->endy[i]   = mesh.ny-1;
		procs->startz[i] = 0;
		procs->endz[i]   = mesh.nz-1;

		curx = procs->endx[i] + 1;
	    }

	}
	else
	{
	    /* FOUND ODD NUMBER OF RANKS >= 3 */
	    /* DISTRIBUTE OVER X's */
	    ldx = floor( (mesh.nx)/size );
	    curx = 0;
	    
	    for( i=0; i<size; i++ )
	    {
		procs->startx[i] = curx;
		procs->starty[i] = 0;
		procs->startz[i] = 0;
		procs->endx[i]   = curx+ldx;
		procs->endy[i]   = mesh.ny-1;
		procs->endz[i]   = mesh.nz-1;
		
		curx = procs->endx[i]+1;
	    }

	    /* FIX FINAL RANK */
	    procs->endx[size-1] = mesh.nx-1;
	}
    }

    MPI_Barrier( MPI_COMM_WORLD );
    
    /* DISTRIBUTE THE ARRAYS */
    MPI_Bcast( procs->startx, size, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( procs->starty, size, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( procs->startz, size, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( procs->endx, size, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( procs->endy, size, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( procs->endz, size, MPI_INT, 0, MPI_COMM_WORLD );

    return rtn;
}

//----------------------------------------NSMPI_DISTRIBUTEDATA
// NSMPI_DISTRIBUTEDATA
// DISTRIBUTES RELEVANT CONFIGURATION DATA TO ALL MESH RANKS
// PRE : DATA IN NONZERO
// POST: DATA HAS BEEN SENT TO ALL RANKS
// RETURNS 0 ON SUCCESS, NONZERO OTHERWISE 
//------------------------------------------------------------
int nsmpi_distributedata( NSMESH *mesh, 
			  double *Re, 
			  double *dt, 
			  double *tf, 
			  double *nt,
			  double *lx, 
			  double *ly, 
			  double *lz,
			  double *dx,
			  double *dy, 
			  double *dz,
			  int rank )
{

    /* NON-MESH DATA */
    MPI_Bcast( Re, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( tf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( ly, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( lz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( nt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( dx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( dy, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( dz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
	
    /* MESH DATA */
    MPI_Bcast( &mesh->nx, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->ny, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->nz, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->mx, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->my, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->mz, 1, MPI_INT, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->uN, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->uS, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->vE, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->vW, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->wF, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    MPI_Bcast( &mesh->wB, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
    
    /* U, V, W, Ustar, Vstar, Wstar, P, Pnew are all local to each thread */

    return 0;
}


//----------------------------------------NSMPI_ALLOCLOCALMESH
// NSMPI_ALLOCLOCALMESH
// SETS UP RELEVANT LOCAL VARIABLES FOR MESH
// ALLOCATES LOCAL MESH MEMORY
// PRE : `PROCS` STRUCTURE HAS BEEN ALLOCATED 
// POST: LOCAL MESH IS ALLOCATED
// RETURNS 0 ON SUCCESS, NONZERO OTHERWISE 
//------------------------------------------------------------
int nsmpi_alloclocalmesh( NSMESH *mesh, NSPROCS procs, int rank )
{
    /* VARS */
    int i        = 0;
    int j        = 0;
    int lnx      = 0;   //-- LOCAL MESH->nX
    int lny      = 0;   //-- LOCAL MESH->nY
    int lnz      = 0;   //-- LOCAL MESH->nZ
    double ngrid = 0;
    /* ---- */
    
    /* SETUP LOCAL VERSIONS OF MX, MY, MZ */
    lnx = (procs.endx[rank] - procs.startx[rank])+1;
    lny = (procs.endy[rank] - procs.starty[rank])+1;
    lnz = (procs.endz[rank] - procs.startz[rank])+1;
    
    mesh->nx = lnx;
    mesh->ny = lny;
    mesh->nz = lnz;
    mesh->mx = lnx-2;
    mesh->my = lny-2;
    mesh->mz = lnz-2;

#ifdef _DEBUG_
    printf( "%d%s%d%s%d%s%d%s%d%s%d\n", procs.startx[rank], " ", procs.endx[rank], " ", procs.starty[rank], " ", procs.endy[rank], " ", procs.startz[rank], " ", procs.endz[rank]);
#endif																						       
    printf( "%s%d%s%d%s%d%s%d\n", "....GRID POINT DIMENSIONS W/ GHOST CELLS @ RANK= ", rank, " : ", lnx, " X ", lny, " X ", lnz );

    ngrid = sizeof( double ) * mesh->nx * mesh->ny * mesh->nz;

    /* ALLOCATE MEMORY */
     /*-- U */
    mesh->U = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->U[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->U[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }
    /*-- V */
    mesh->V = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->V[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->V[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

    /*-- W */
    mesh->W = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->W[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->W[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

    /*-- Ustar */
    mesh->Ustar = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->Ustar[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->Ustar[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

    /*-- Vstar */
    mesh->Vstar = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->Vstar[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->Vstar[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

    /*-- Wstar */
    mesh->Wstar = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->Wstar[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->Wstar[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

     /*-- P */
    mesh->P = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->P[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->P[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }
    
    /*-- Pnew */
    mesh->Pnew = (double ***)malloc( mesh->nx * sizeof (double ***) );
    for( i=0; i<mesh->ny; i++ )
    {
        mesh->Pnew[i] = (double **)malloc( mesh->ny * sizeof( double ** ) );
        for( j=0; j<mesh->nz; j++ )
        {
            mesh->Pnew[i][j] = (double *)malloc( mesh->nz * sizeof( double * ) );
        }
    }

    /* SANITY CHECK THE MEMORY */
    if( (mesh->U     == NULL) || 
        (mesh->V     == NULL) || 
        (mesh->W     == NULL) ||
        (mesh->Ustar == NULL) || 
        (mesh->Vstar == NULL) || 
        (mesh->Wstar == NULL) || 
        (mesh->P     == NULL) || 
        (mesh->Pnew  == NULL))
    {
        nsproj_meshfree( mesh );
        return -1;
    }

    printf( "%s%d%s%f%s\n", 
	    "....RANK = ", 
	    rank, 
	    " ALLOCATED ", 
            ngrid*8,
            " BYTES OF MEMORY" );

    /* LET EVERYONE CATCH UP */
    MPI_Barrier( MPI_COMM_WORLD );

    return 0;
}


//----------------------------------------NSMPI_DISTRIBUTEMESH
// NSMPI_DISTRIBUTEMESH
// DISTRIBUTES MESH ELEMENTS FROM FULL COPY TO REMOTE THREADS
// PRE : ALL MESH DATA IS VALID
// POST: ALL REMOTE MESH DATA HAS BEEN UPDATED
// RETURNS 0 ON SUCCESS, NONZERO OTHERWISE
//------------------------------------------------------------
int nsmpi_distributemesh( NSMESH *mesh, NSMESH *lmesh, NSPROCS procs, int rank, int size )
{
    /* VARS */
    int c  = 0;
    int i  = 0;
    int j  = 0;
    int k  = 0;
    int cx = 0;
    int cy = 0;
    int cz = 0;
    int sz = 0;
    MPI_Status status;
    /* ---- */

    /* FOR EACH RANK, DISTRIBUTE NEW MESH */
    for( c=1; c<size; c++ )
    {
	if( rank == 0 )
	{
	    for( i=procs.startx[c]; i<=procs.endx[c]; i++ )
	    {
		for( j=procs.starty[c]; j<=procs.endy[c]; j++ )
		{
		    for( k=procs.startz[c]; k<=procs.endz[c]; k++ )
		    {   
			MPI_Send( &mesh->U[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD );
			MPI_Send( &mesh->V[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD );
			MPI_Send( &mesh->W[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD );
			MPI_Send( &mesh->P[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD );			
		    }
		}
	    }
	}
	else if( rank == c )
	{   
	    for( i=0; i<lmesh->nx; i++ )
	    {
		for( j=0;j<lmesh->ny; j++ )
		{
		    for( k=0; k<lmesh->nz; k++ )
		    {
			MPI_Recv( &lmesh->U[i][j][k], 1, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status );
			MPI_Recv( &lmesh->V[i][j][k], 1, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status );
			MPI_Recv( &lmesh->W[i][j][k], 1, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status );
			MPI_Recv( &lmesh->P[i][j][k], 1, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status );
		    }
		}
	    }
	}
	
	MPI_Barrier( MPI_COMM_WORLD );
    }
     
    /* FIX RANK = 0 */
    if( rank == 0 )
    {
	for( i=0; i<lmesh->nx; i++ )
	{
	    for( j=0; j<lmesh->ny; j++ )
	    {
		for( k=0; k<lmesh->nz; k++ )
		{
		    lmesh->U[i][j][k] = mesh->U[procs.startx[0]+i][procs.starty[0]+j][procs.starty[0]+k];
		    lmesh->V[i][j][k] = mesh->V[procs.startx[0]+i][procs.starty[0]+j][procs.starty[0]+k];
		    lmesh->W[i][j][k] = mesh->W[procs.startx[0]+i][procs.starty[0]+j][procs.starty[0]+k];
		    lmesh->P[i][j][k] = mesh->P[procs.startx[0]+i][procs.starty[0]+j][procs.starty[0]+k];
		}
	    }
	}
    }
    
    
    
    MPI_Barrier( MPI_COMM_WORLD );

    return 0;
}


//----------------------------------------NSMPI_COLLECTMESH
// NSMPI_COLLECTMESH 
// COLLECTS MESH ELEMENTS FROM REMOTE THREADS AND 
// UPDATES FULL COPY ON RANK 0
// PRE : ALL REMOTE MESH DATA IS VALID
// POST: FULL MESH HAS BEEN UPDATED ON RANK 0 
// RETURNS 0 ZERO ON SUCCESS NONZERO OTHERWISE
//---------------------------------------------------------
int nsmpi_collectmesh( NSMESH *mesh, NSMESH *lmesh, NSPROCS procs, int rank, int size )
{
    /* VARS */
    int i    = 0;
    int j    = 0;
    int k    = 0;
    int a    = 0;
    int b    = 0;
    int c    = 0;
    MPI_Status status[4];
    MPI_Request request[4];
    /* ----- */

    if( rank == 0 ){printf( "%s\n", "START_COLLECT" );}

    for( c=1; c<size; c++ )
    {
	if( rank == 0 )
	{
	    for( i=procs.startx[c]; i<=procs.endx[c]; i++ )
	    {
		for( j=procs.starty[c]; j<=procs.endy[c]; j++ )
		{
		    for( k=procs.startz[c]; k<=procs.endz[c]; k++ )
		    {
			MPI_Irecv( &mesh->U[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD, &request[0] );
			MPI_Irecv( &mesh->V[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD, &request[1] );
			MPI_Irecv( &mesh->W[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD, &request[2] );
			MPI_Irecv( &mesh->P[i][j][k], 1, MPI_DOUBLE, c, c, MPI_COMM_WORLD, &request[3] );
			MPI_Waitall( 4, request, status );
		    }
		}
	    }
	}
	else if( rank == c )
	{
	    for( i=0; i<lmesh->nx; i++ )
	    {
		for( j=0; j<lmesh->ny; j++ )
		{
		    for( k=0; k<lmesh->nz; k++ )
		    {
			MPI_Isend( &lmesh->U[i][j][k], 1, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &request[0] );
			MPI_Isend( &lmesh->V[i][j][k], 1, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &request[1] );
			MPI_Isend( &lmesh->W[i][j][k], 1, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &request[2] );
			MPI_Isend( &lmesh->P[i][j][k], 1, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &request[3] );
			MPI_Waitall( 4, request, status );
		    }
		}
	    }
	}
	
	MPI_Barrier( MPI_COMM_WORLD );
    }

    

    if( rank == 0 );
    {
	/* RANK0 ==> RANK0 */
	for( i=0; i<lmesh->nx; i++ )
	{
	    for( j=0; j<lmesh->ny; j++ )
	    {
		for( k=0; k<lmesh->nz; k++ )
		{
		    a = procs.startx[0]+i;
		    b = procs.starty[0]+j;
		    c = procs.startz[0]+k;
		    
		    mesh->U[a][b][c] = lmesh->U[i][j][k];
		    mesh->V[a][b][c] = lmesh->V[i][j][k];
		    mesh->W[a][b][c] = lmesh->W[i][j][k];
		    mesh->P[a][b][c] = lmesh->P[i][j][k];
		}
	    }
	}
    }

    if( rank == 0 ){printf( "%s\n", "END_COLLECT" );}

    MPI_Barrier( MPI_COMM_WORLD );
    
    return 0;
}
