
#include <iostream>
#include <cmath>
#include "Meshes/Uniform.hpp"
#include "DDM/CartComm.hpp"
#include "DDM/SubDomain.hpp"
#include "Equations/ScalarEquation.hpp"
#include "Schemes/CDS.hpp"
#include "Solvers/TDMA.hpp"
#include "Utils/inout.hpp" 
#include "Utils/num_utils.hpp"
#include "Utils/GNUplot.hpp"
using namespace Tuna;
  
// Definition of scalar field in one dimension
typedef TunaArray<double,1>::huge ScalarField1D;

int FLAG;

// Parameters to make the partition
double length_x;
int num_nodes_x, num_subdom;
int nc_ovlp_l, nc_ovlp_r;
double subdom_length_x;

// Parameters to identify the subdomain
int Isub, Jsub, Nsub_x, Nsub_y, rank, size;

// Local variables for each subdomain
double dt, tolerance, amplitude;
int frequency;
int steps, ddm_iter;

bool read_data_and_Bcast(CartComm<1>&);

int main( int argc, char **argv)
{

  //    MPI::Init(argc, argv);
  //    rank = MPI::COMM_WORLD.Get_rank();       
  //    size = MPI::COMM_WORLD.Get_size(); 
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);  
  
// ----- Construction of a Cartesian topology

    CartComm<1> cart(argc, argv, rank);   
    Isub = cart.get_I();
    num_subdom = cart.getNumProc_I();
    //    cart.print();    

// Reading and broadcasting data from the input file 
    read_data_and_Bcast(cart);   
    
// Global Mesh
    StructuredMesh<Uniform<double, 1> > mesh(length_x, num_nodes_x);

    double shift = Isub * length_x / num_subdom;    

// Subdomain
    SubDomain<double, 1> subdom(cart, mesh, nc_ovlp_l, nc_ovlp_r);
    double dx = mesh.getDelta(X);
    double num_nodes_x = mesh.getExtentNodes(X);

    //    subdom.print();
    //    mesh.print(Isub, Jsub);    
    
    ScalarField1D T(mesh.getExtentVolumes());
    ScalarField1D Tn(mesh.getExtentNodes());

    if (Isub != 0) shift -= dx * nc_ovlp_l;
    firstIndex i;
    Tn = amplitude * cos ( 3 * PI * (dx * i + shift) );

    ostringstream dir_number;
    dir_number.width(2);
    dir_number.fill('0');
    dir_number << rank;
    string dnumber = dir_number.str();
    string filename = "./DataHeat1D/" + dnumber + "/temp.";
    InOut::writeToFileP(Tn, 0, filename, dx, shift);

    SparseMatrix< Diagonal < double, 1> > A(num_nodes_x); 
    ScalarField1D                         b(num_nodes_x);

    NumUtils::interpolateToVolumes(T, Tn);

    ScalarEquation< CDS<double, 1> > heat(T, A, b, mesh.getDeltas());
    heat.setDeltaTime(dt); 
    heat.setGamma(1.0);
    heat.setDirichlet(LEFT_WALL); 
    heat.setDirichlet(RIGHT_WALL);
    heat.print(rank);

    int iteration = 1;
    double error = 1.0, global_error = 1.0, residual = 1.0, global_residual = 1.0;
    ofstream error_fp("./DataHeat1D/error"), residual_fp("./DataHeat1D/residual");

    /* */
    while ( (global_error > tolerance) && (iteration < steps) ) {

      for (int iddm = 1; iddm <= ddm_iter; iddm++) {
	heat.calcCoefficients();
	Solver::TDMA1D(heat);
	error = heat.calcErrorL1();     // local error
	residual = heat.calcResidual(); // local residual
	
	subdom.infoExchange(heat.phi);  // T is updated with info from neighbors subdomains
	//	heat.update();           // T is updated with the new solution
	//	subdom.infoExchange(T);  // T is updated with info from neighbors subdomains
	//	heat.updatePhi(T);       // phi is updated with T
      }

       heat.update();           // T is updated with the new solution

      if ( !(iteration % frequency) ) {
	NumUtils::interpolateToNodes(Tn, T);
	InOut::writeToFileP(Tn, iteration, filename, dx, shift);
      }

      MPI_Allreduce(&error, &global_error, 1, MPI_DOUBLE, MPI_MAX, cart.comm); 
      MPI_Allreduce(&residual, &global_residual, 1, MPI_DOUBLE, MPI_MAX, cart.comm); 

      if (rank == FLAG ) {
	error_fp << iteration << "\t" << global_error << std::endl;
	residual_fp << iteration << "\t" << global_residual << std::endl;
	std::cout << "\n ----- Proc = " << rank << " | Iter = " << iteration 
		  << "\t Global Residual = " << global_residual
		  << "\t Global Error = " << global_error;
      }
      
      iteration++;
    }
    //    MPI::Finalize();

    MPI_Finalize();

    return 0;
}


bool read_data_and_Bcast(CartComm<1>& cart) 
{
  int    data_Int[7];
  double data_Flt[4];

    if (rank == 0) {
	std::cout << "\n\n"
		  << " +----------------------------------------+\n"
		  << " |    TUNA FOR PDE SOLVING IN PARALLEL    |\n"
		  << " +----------------------------------------+\n"
		  << " | MASTER PROCESS rank = " << rank << "\n"
		  << " | No subdomains in x-axis = " << num_subdom << "\n"
		  << " +----------------------------------------+\n"
		  << "\n";

// ----- Reading data from "input" file	
	std::ifstream input_cin ("inputHeat1D");
	input_cin >> data_Flt[0]  // Global length in x-axis
		  >> data_Int[0]  // Global num of nodes in x-axis
		  >> data_Int[1]  // Overlapping on the right
		  >> data_Int[2]  // Overlapping on the left
		  >> data_Flt[1]  // Amplitude for the initial condition
		  >> data_Flt[2]  // Time step
		  >> data_Flt[3]  // Tolerance
		  >> data_Int[3]  // Steps
		  >> data_Int[4]  // Print frequency
		  >> data_Int[5]  // DDM iterations
		  >> data_Int[6]; // FLAG 
	input_cin.close();
    }
// ----- Broadcast the info to all processors
    MPI_Bcast(data_Int, 7, MPI::INT, 0, cart.comm);    
    MPI_Bcast(data_Flt, 4, MPI::DOUBLE, 0, cart.comm);

// ----- Using global variables.
    length_x    = data_Flt[0];
    amplitude   = data_Flt[1];
    dt          = data_Flt[2];
    tolerance   = data_Flt[3];

    num_nodes_x = data_Int[0];
    nc_ovlp_l   = data_Int[1];
    nc_ovlp_r   = data_Int[2];
    steps       = data_Int[3];
    frequency   = data_Int[4];
    ddm_iter    = data_Int[5];
    FLAG        = data_Int[6];
    
    return 0;
}


