
#include <iostream>
#include <cmath>
#include "Meshes/Uniform.hpp"
#include "Utils/inout.hpp" 
#include "Utils/num_utils.hpp"
#include "Utils/GNUplot.hpp"
#include "DDM/CartComm.hpp"
#include "DDM/SubDomain.hpp"
using namespace Tuna;


// TEMPORAL FLAG
int FLAG;
  
// Scalar field in one dimension
typedef TunaArray<double,2>::huge ScalarField2D;

// Parameters to make the partition
double length_x, length_y;
int num_nodes_x, num_nodes_y;
int nc_ovlp_l, nc_ovlp_r, nc_ovlp_d, nc_ovlp_u;

// Parameters to identify the subdomain
int Isub, Jsub, num_subdom_x, num_subdom_y, rank, size;

bool read_data_and_Bcast(CartComm<2>&);

int main( int argc, char **argv)
{

  //    MPI::Init(argc, argv);
  //    rank = MPI::COMM_WORLD.Get_rank();       
  //    size = MPI::COMM_WORLD.Get_size(); 
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size); 

// ----- Construction of a Cartesian topology

    CartComm<2> cart(argc, argv, rank);   
    Isub = cart.get_I();
    Jsub = cart.get_J();
    //    cart.print();    

// Reading and broadcasting data from the input file 
    read_data_and_Bcast(cart);   

// Global Mesh
    StructuredMesh<Uniform<double, 2> > mesh(length_x, num_nodes_x,
					     length_y, num_nodes_y);
    //    mesh.print();

// Subdomain
    SubDomain<double, 2> subdom(cart, mesh, 
				nc_ovlp_l, nc_ovlp_r,
				nc_ovlp_d, nc_ovlp_u);
    //    subdom.print();
    //    mesh.print(Isub, Jsub);

    num_nodes_x = mesh.getExtentNodes(X);
    num_nodes_y = mesh.getExtentNodes(Y);

    ScalarField2D T(mesh.getExtentVolumes());
    ScalarField2D Tn(mesh.getExtentNodes());
    Range all = Range::all();
    
    for(int i = Tn.lbound(firstDim); i <= Tn.ubound(firstDim) ; i++)
      for(int j = Tn.lbound(secondDim); j <= Tn.ubound(secondDim) ; j++)
	Tn(i, j) = rank;


    if (rank == FLAG) 
      std::cout << "\n Initial T (" << Isub << ", " << Jsub << ") \n" << Tn <<std::endl;

    subdom.infoExchange(Tn, 0);

    if (rank == FLAG) {
      //std::cout << "\n After T (" << Isub << "," << Jsub << ") \n" << Tn <<std::endl;
      /* */
      for(int j = num_nodes_y-1; j >= 0; --j) {
	for(int i = 0; i < num_nodes_x; ++i)
	  std::cout << "\t" << Tn(i,j) << "\t";
	std::cout << std::endl;
      }
      /* */
    }

    //    MPI::Finalize();
    MPI_Finalize();

    return 0;
}


bool read_data_and_Bcast(CartComm<2>& cart) 
{
  int    data_Int[7];
  double data_Flt[2];
  int num_subdom_x = cart.getNumProc_I(); 
  int num_subdom_y = cart.getNumProc_J();
  
  if (rank == 0) {
    std::cout << "\n\n"
	      << " +----------------------------------------+\n"
	      << " |    TUNA FOR PDE SOLVING IN PARALLEL    |\n"
	      << " +----------------------------------------+\n"
	      << " | MASTER PROCESS rank = " << rank << "\n"
	      << " | No subdomains in x-axis = " << num_subdom_x << "\n"
	      << " | No subdomains in y-axis = " << num_subdom_y << "\n"
	      << " +----------------------------------------+\n"
	      << "\n";
	
    // ----- Reading data from "input" file	
    std::ifstream input_cin ("inputTest2D");
    input_cin >> data_Flt[0]   // length in x-axis
	      >> data_Flt[1]   // length in y-axis
	      >> data_Int[0]   // Num of nodes in x-axis for each subdomain
	      >> data_Int[1]   // Num of nodes in y-axis for each subdomain
	      >> data_Int[2]  // Overlapping left
	      >> data_Int[3]  // Overlapping right
	      >> data_Int[4]  // Overlapping down
	      >> data_Int[5]  // Overlapping up
	      >> data_Int[6];  // FLAG
    input_cin.close();
  }
// ----- Broadcast the info to all processors
  MPI_Bcast(data_Int, 7, MPI::INT, 0, cart.comm);    
  MPI_Bcast(data_Flt, 2, MPI::DOUBLE, 0, cart.comm); 
  
  // ----- Using global variables.
  length_x      = data_Flt[0];
  length_y      = data_Flt[1];
  num_nodes_x   = data_Int[0];
  num_nodes_y   = data_Int[1];
  nc_ovlp_l     = data_Int[2]; 
  nc_ovlp_r     = data_Int[3];
  nc_ovlp_d     = data_Int[4]; 
  nc_ovlp_u     = data_Int[5];
  FLAG          = data_Int[6];
  
  return 0;
}




