
#include <iostream>
#include <cmath>
#include "Meshes/Uniform.hpp"
#include "Utils/inout.hpp" 
#include "Utils/num_utils.hpp"
#include "Utils/GNUplot.hpp"
#include "DDM/CartComm.hpp"
#include "DDM/SubDomain.hpp"
#include "Equations/ScalarEquation.hpp"
#include "Schemes/CDS.hpp"
#include "Solvers/TDMA.hpp"
using namespace Tuna;


// TEMPORAL FLAG
int FLAG;
  
// Scalar field in one dimension
typedef TunaArray<double,2>::huge ScalarField2D;

// Parameters to make the partition
double length_x, length_y;
int num_nodes_x, num_subdom_x;
int num_nodes_y, num_subdom_y;
int nc_ovlp_l, nc_ovlp_r, nc_ovlp_d, nc_ovlp_u;
int Isub, Jsub, rank, size;

// Local variables for each subdomain
double dt, tolerance;
int tdma_iter, steps, frequency, ddm_iter;

bool read_data_and_Bcast(CartComm<2>&);

int main( int argc, char **argv)
{

  //    MPI::Init(argc, argv);
  //    rank = MPI::COMM_WORLD.Get_rank();       
  //    size = MPI::COMM_WORLD.Get_size(); 
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);    

// ----- Construction of a Cartesian topolog
    CartComm<2> cart(argc, argv, rank);   
    Isub = cart.get_I();
    Jsub = cart.get_J();
    num_subdom_x = cart.getNumProc_I(); 
    num_subdom_y = cart.getNumProc_J();
    //    cart.print();    

// Reading and broadcasting data from the input file 
    read_data_and_Bcast(cart);   

// Global Mesh
    StructuredMesh<Uniform<double, 2> > mesh(length_x, num_nodes_x,
					     length_y, num_nodes_y);

    double shiftx = Isub * length_x / num_subdom_x;
    double shifty = Jsub * length_y / num_subdom_y;

// Subdomain
    SubDomain<double, 2> subdom(cart, mesh, 
				nc_ovlp_l, nc_ovlp_r,
				nc_ovlp_d, nc_ovlp_u);
    double dx = mesh.getDelta(X);
    double dy = mesh.getDelta(Y);
    double num_nodes_x = mesh.getExtentNodes(X);
    double num_nodes_y = mesh.getExtentNodes(Y);

    //    subdom.print();    
    //    mesh.print(Isub, Jsub);
    
    ScalarField2D T(mesh.getExtentVolumes());
    ScalarField2D Tn(mesh.getExtentNodes());
    Range all = Range::all();
    Tn = 0;
    Range I ( Tn.lbound(firstDim), Tn.ubound(firstDim) );
    firstIndex i;
    if (Isub != 0) shiftx -= dx * nc_ovlp_l;
    if (Jsub != 0) shifty -= dy * nc_ovlp_d;
    if ( Jsub == (num_subdom_y - 1) )
      Tn(I, Tn.ubound(secondDim)) = 10 * sin ( PI * (dx * i + shiftx) );

    ostringstream dir_number;
    dir_number.width(2);
    dir_number.fill('0');
    dir_number << rank;
    string dnumber = dir_number.str();
    string filename = "./DataHeat2D/" + dnumber + "/temp.";

    InOut::writeToFileP(Tn, 0, filename, dx, shiftx, dy, shifty);

    SparseMatrix< Diagonal< double, 2> > A(num_nodes_x, num_nodes_y); 
    ScalarField2D                        b(num_nodes_x, num_nodes_y);

    NumUtils::interpolateToVolumes(T, Tn);

    ScalarEquation<CDS<double, 2> > heat(T, A, b, mesh.getDeltas());
    heat.setDeltaTime(dt);
    heat.setGamma(1.0);
    heat.setDirichlet(TOP_WALL);
    heat.setDirichlet(LEFT_WALL);
    heat.setDirichlet(RIGHT_WALL);
    heat.setDirichlet(BOTTOM_WALL);

    int iteration = 1;
    double error = 1.0, error_global =  1.0, residual = 1.0, residual_global;
    ofstream error_fp("./DataHeat2D/error"), residual_fp("./DataHeat2D/residual");

    while ( (error_global > tolerance) && (iteration < steps) ) {

      for (int iddm = 1; iddm <= ddm_iter; iddm++) {
	heat.calcCoefficients();
	Solver::TDMA2DX(heat, tolerance, tdma_iter);
	error = heat.calcErrorL1();
	residual = heat.calcResidual();
      
	subdom.infoExchange(heat.phi);  // T is updated with info from neighbors subdomains

	//	heat.update();           // T is updated with the new solution
	//	subdom.infoExchange(T);  // T is updated with info from neighbors subdomains
	//	heat.updatePhi(T);       // phi is updated with T	
      }

      heat.update();           // T is updated with the new solution

      if ( !(iteration % frequency) ) {
	NumUtils::interpolateToNodes(Tn, T);
	InOut::writeToFileP(Tn, iteration, filename, dx, shiftx, dy, shifty);
      }

      MPI_Allreduce(&error, &error_global, 1, MPI_DOUBLE, MPI_MAX, cart.comm);
      MPI_Allreduce(&residual, &residual_global, 1, MPI_DOUBLE, MPI_MAX, cart.comm);

      if (rank == FLAG ) {
	error_fp << iteration << "\t" << error_global << std::endl;
	residual_fp << iteration << "\t" << residual_global << std::endl;      
	std::cout << "\n ----- Proc = " << rank << " | Iter = " << iteration 
		<< "\t Global Residual = " << residual_global 
		<< "\t Global Error = " << error_global;
      }

      iteration++;
    }

    //    MPI::Finalize();
    MPI_Finalize();

    return 0;
}


bool read_data_and_Bcast(CartComm<2>& cart) 
{
  int    data_Int[11];
  double data_Flt[4];
  
  if (rank == 0) {
    std::cout << "\n\n"
	      << " +----------------------------------------+\n"
	      << " |    TUNA FOR PDE SOLVING IN PARALLEL    |\n"
	      << " +----------------------------------------+\n"
	      << " | MASTER PROCESS rank = " << rank << "\n"
	      << " | No subdomains in x-axis = " << num_subdom_x << "\n"
	      << " | No subdomains in y-axis = " << num_subdom_y << "\n"
	      << " +----------------------------------------+\n"
	      << "\n";
	
    // ----- Reading data from "input" file	
    std::ifstream input_cin ("inputHeat2D");
    input_cin >> data_Flt[0]  // length in x-axis
	      >> data_Flt[1]  // length in y-axis
	      >> data_Int[0]  // Num of nodes in x-axis for each subdomain
	      >> data_Int[1]  // Num of nodes in y-axis for each subdomain
	      >> data_Int[2]  // Overlapping left
	      >> data_Int[3]  // Overlapping right
	      >> data_Int[4]  // Overlapping down
	      >> data_Int[5]  // Overlapping up
	      >> data_Flt[2]  // Time step
	      >> data_Flt[3]  // Tolerance
	      >> data_Int[6]  // Max iterations for TDMA algo
	      >> data_Int[7]  // Max iterations (Steps)
	      >> data_Int[8]  // Print frequency
	      >> data_Int[9]  // FLAG
	      >> data_Int[10]; // DDM iterations
     
    input_cin.close();
  }
// ----- Broadcast the info to all processors
  MPI_Bcast(data_Int, 11, MPI::INT, 0, cart.comm);    
  MPI_Bcast(data_Flt, 4, MPI::DOUBLE, 0, cart.comm); 

// ----- Using global variables.
    length_x    = data_Flt[0];
    length_y    = data_Flt[1];
    dt          = data_Flt[2];
    tolerance   = data_Flt[3];

    num_nodes_x = data_Int[0];
    num_nodes_y = data_Int[1];
    nc_ovlp_l     = data_Int[2]; 
    nc_ovlp_r     = data_Int[3];
    nc_ovlp_d     = data_Int[4]; 
    nc_ovlp_u     = data_Int[5];
    tdma_iter    = data_Int[6];
    steps        = data_Int[7];
    frequency   = data_Int[8];
    FLAG        = data_Int[9];
    ddm_iter    = data_Int[10];

    return 0;
}



