//
// To compile this example: make Test1D
// To run this example use: mpiexec -np 3 ./Test1D 3 1 1
// Input values for this example are in the file inputTest1D.
//
// Using 13 as the number of nodes, and 3 subdomains, the program
// should have the next behavior:
//
/*
   Original Domain:

   0    1    2    3    4    5    6    7    8    9   10   11   12
   |                   .                   .                   |
   +----+----+----+----+----+----+----+----+----+----+----+----+
   |                   .                   .                   |

   Partition into 3 subdomains:

   0    1    2    3    4    5
   |                   |              
   +----+----+----+----+====+                      ==== : indicates overlapping 
   |    
                  0    1    2    3    4    5    6 
                       |                   |
		  +====+----+----+----+----+====+
                       |
                                      0    1    2    3    4    5
                                           |                   |
                                      +====+----+----+----+----+
                                           |                   |

Original array should have 13 places, one for each node:

------------------------------------------------------------------------------------
T    = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] 
------------------------------------------------------------------------------------

After a partition of 3 subdomains we got:
------------------------------------------------------------------------------------
T(0) = [0, 1, 2, 3, 4, 5]                        <---             5 places + overlap_r
T(1) =          [0, 1, 2, 3, 4, 5, 6]            <--- overlap_l + 5 places + overlap_r
T(2) =                      [0, 1, 2,  3,  4, 5] <--- overlap_l + 5 places
------------------------------------------------------------------------------------

After info exchange, this is the result:
------------------------------------------------------------------------------------
T(0) = [0, 1, 2, 3, 4, 2]
                 |     ^     
                 v     |     
T(1) =          [3, 1, 2, 3, 4, 5, 2]
                             |     ^
                             v     |
T(2) =                      [4, 1, 2,  3,  4, 5]
------------------------------------------------------------------------------------
*/

#include <iostream>
#include <cmath>
#include "Meshes/Uniform.hpp"
#include "Utils/inout.hpp" 
#include "Utils/num_utils.hpp"
#include "Utils/GNUplot.hpp"
#include "DDM/CartComm.hpp"
#include "DDM/SubDomain.hpp"
using namespace Tuna;
  
// Scalar field in one dimension
typedef TunaArray<double,1>::huge ScalarField1D;

// Parameters to make the partition
double length_x;
int num_nodes_x;
int nc_ovlp_l, nc_ovlp_r;

// Parameters to identify the subdomain
int Isub, rank, size;

bool read_data_and_Bcast(CartComm<1>&);

int main( int argc, char **argv)
{

  //    MPI::Init(argc, argv);
  //    rank = MPI::COMM_WORLD.Get_rank();       
  //    size = MPI::COMM_WORLD.Get_size(); 
    
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);    

// ----- Construction of a Cartesian topology

    CartComm<1> cart(argc, argv, rank);   
    Isub = cart.get_I();
    //    cart.print();    
    
// ----- Reading and broadcasting data from the input file 
    read_data_and_Bcast(cart);   

    StructuredMesh<Uniform<double, 1> > mesh(length_x, num_nodes_x);
    //    mesh.print(Isub);

// ----- Subdomain creation, mesh resizing and setup of info exchange
    SubDomain<double, 1> subdom(cart, mesh, nc_ovlp_l, nc_ovlp_r);
    //    subdom.print();
    //    mesh.print(Isub);

    ScalarField1D Tn(mesh.getExtentNodes()); // Array with values on nodes
    Range all = Range::all();

    for(int i = Tn.lbound(firstDim); i <= Tn.ubound(firstDim) ; i++)
      Tn(i) = i;

    std::cout << "\n Initial T (" << Isub << ") \n" << Tn <<std::endl;

    subdom.infoExchange(Tn, 0); // 0 indicates an info exchange on nodes

    std::cout << "\n After exchange T (" << Isub << ") \n" << Tn <<std::endl;
    
    //    MPI::Finalize();

    MPI_Finalize();
    return 0;
}


bool read_data_and_Bcast(CartComm<1>& cart) 
{
  int    data_Int[3];
  double data_Flt[1];
  int    num_subdom = cart.getNumProc_I();
  
    if (rank == 0) {
	std::cout << "\n\n"
		  << " +----------------------------------------+\n"
		  << " |    TUNA FOR PDE SOLVING IN PARALLEL    |\n"
		  << " +----------------------------------------+\n"
		  << " | MASTER PROCESS rank = " << rank << "\n"
		  << " | No subdomains in x-axis = " << num_subdom << "\n"
		  << " +----------------------------------------+";
	
// ----- Reading data from "input" file	
	std::ifstream input_cin ("inputTest1D");
	input_cin >> data_Flt[0]   // length in x-axis
		  >> data_Int[0]   // Num of nodes in x-axis for each subdomain
		  >> data_Int[1]   // Overlapping on the left
		  >> data_Int[2];  // Overlapping on the right
	input_cin.close();

	cout << "\n | Length of the global domain = " << data_Flt[0]
	     << "\n | Number of nodes on X-axis in each subdomain = " << data_Int[0]
	     << "\n | Overlapping on the left of each subdomain = " << data_Int[1]
	     << "\n | Overlapping on the right of each subdomain = " << data_Int[2]
	     << "\n +----------------------------------------+\n";

 
    }
// ----- Broadcast the info to all processors
//    cart.comm.Bcast(data_Int, 3, MPI::INT, 0);    
//    cart.comm.Bcast(data_Flt, 1, MPI::DOUBLE, 0); 
    MPI_Bcast(data_Int, 3, MPI::INT, 0, cart.comm);    
    MPI_Bcast(data_Flt, 1, MPI::DOUBLE, 0, cart.comm); 

// ----- Using global variables.
    length_x      = data_Flt[0];
    num_nodes_x   = data_Int[0];
    nc_ovlp_l     = data_Int[1];
    nc_ovlp_r     = data_Int[2];
    
    return 0;
}

