/*
 ***************************************************************************
 * \file   convForcedPar2D.cpp
 * Forced Convection-Diffusion in 2D
 *
 * \par Description
 * This code solves in parallel the next equation:
 * \f[ 
 * \frac{\partial T}{\partial t} + 
 * u_j \frac{\partial T}{\partial x_j} = \nabla^2 T
 * \f]. 
 * \par
 * This equation is solved in a unit square \f$ x, y \in [0,1] \f$. 
 * The velocity is prescribed and has the form:
 * \f[
 * u = -A \cos(\pi y) \sin(\pi \lambda x / L_x );
 * v = \frac{A \lambda}{L_x} \sin(\pi y) \cos(\pi \lambda x / L_x);
 *\f]
 * The boundary conditions are as shown in the next figure:
 *
 * \image html convection.png "Unit square in 2D" width=10cm
 * \image latex convection.eps "Unit square in 2D" width=10cm
 *
 *  \par Compiling and running
 *  Modify the variables BASE and BLITZ in the file \c tuna-cfd-rules.in 
 *  according to your installation and then type the next commands:
 * \par
   \verbatim
   % make
   % ./paraConvDiff01 \endverbatim
 * 
 ***************************************************************************
 * \author    Luis M. de la Cruz  [ Sat May 23 12:06:36 CDT 2009 ]
 ***************************************************************************
 */

#include <iostream>
#include <cmath>
#include "Meshes/Uniform.hpp"
#include "Equations/ScalarEquation.hpp"
#include "Schemes/Upwind_CoDi.hpp"
#include "Solvers/TDMA.hpp"
#include "Utils/inout.hpp" 
#include "Utils/num_utils.hpp"
#include "Utils/GNUplot.hpp"
#include "DDM/CartComm.hpp"
#include "DDM/SubDomain.hpp"
using namespace Tuna;

// TEMPORAL FLAG
int FLAG;

// Scalar field in one dimension
typedef TunaArray<double,2>::huge ScalarField2D;

// Parameters to make the partition
double length_x, length_y;
int num_nodes_x, num_vols_x, num_subdom_x;
int num_nodes_y, num_vols_y, num_subdom_y;
int nc_ovlp_l, nc_ovlp_r, nc_ovlp_d, nc_ovlp_u;
double ovlp_r, ovlp_l, ovlp_u, ovlp_d;

// Parameters to identify the subdomain
int Isub, Jsub, rank, size;
int ddm_iter;    

// Local variables for each subdomain
double dt, tolerance, amplitude;
int tdma_iter, steps, frequency, lambda;
double right_wall = -0.5, left_wall = 0.5;

bool read_data_and_Bcast(CartComm<2>&);

template<class sfield>
void initialVelocity(sfield &u, sfield &v, double dx, double dy,
		     double A, double l_x, int l, double shiftx, double shifty)
{
  double A2 = A * l / l_x;
  int bi = u.lbound(firstDim), ei = u.ubound(firstDim),
    bj = u.lbound(secondDim), ej = u.ubound(secondDim);
  
  if (Isub == 0) bi++;
  if (Isub == num_subdom_x - 1) ei--;
  if (Jsub == 0) bj++;
  if (Jsub == num_subdom_y - 1) ej--;
  
  for(int i = bi; i <= ei; ++i) {
    for(int j = bj; j <= ej; ++j) {
      u(i,j) = -A * cos (PI * (dy * j + shifty)  ) * 
	sin (PI * l * ( dx * i + shiftx) / l_x );         

      v(i,j) = A2 * sin (PI * (dy * j + shifty) ) * 
	cos (PI * l * (dx * i + shiftx) / l_x);
    }
  }
}


int main( int argc, char **argv)
{

  //    MPI::Init(argc, argv);
  //    rank = MPI::COMM_WORLD.Get_rank();       
  //    size = MPI::COMM_WORLD.Get_size(); 
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);  
    
// ----- Construction of a Cartesian topology

    CartComm<2> cart(argc, argv, rank);   
    Isub = cart.get_I();
    Jsub = cart.get_J();
    num_subdom_x = cart.getNumProc_I();
    num_subdom_y = cart.getNumProc_J(); 
    //    cart.print();
    
// Reading and broadcasting data from the input file 
    read_data_and_Bcast(cart);   

// Global Mesh 
    StructuredMesh<Uniform<double, 2> > mesh(length_x, num_nodes_x,
					     length_y, num_nodes_y);
    double shiftx = Isub * length_x / num_subdom_x;
    double shifty = Jsub * length_y / num_subdom_y;

// Subdomain
    SubDomain<double, 2> subdom(cart, mesh, 
				nc_ovlp_l, nc_ovlp_r,
				nc_ovlp_d, nc_ovlp_u);

    double dx = mesh.getDelta(X);
    double dy = mesh.getDelta(Y);
    int num_nodes_x = mesh.getExtentNodes(X);
    int num_nodes_y = mesh.getExtentNodes(Y);
    int num_vols_x = mesh.getExtentNodes(X);
    int num_vols_y = mesh.getExtentNodes(Y);

    //    subdom.print();
    //    mesh.print(Isub, Jsub);

    ScalarField2D T(mesh.getExtentVolumes());
    ScalarField2D u(mesh.getExtentVolumes());
    ScalarField2D v(mesh.getExtentVolumes());
    ScalarField2D us(num_nodes_x, num_vols_y); // staggered u-velocity 
    ScalarField2D vs(num_vols_x, num_nodes_y); // staggered v-velocity

    ScalarField2D Tn(mesh.getExtentNodes());
    ScalarField2D un(mesh.getExtentNodes());
    ScalarField2D vn(mesh.getExtentNodes());

    Range all = Range::all();
    T = 0;

    if (Isub != 0) shiftx -= dx * nc_ovlp_l;
    if (Jsub != 0) shifty -= dy * nc_ovlp_d;    
    if (Isub == 0)
      T(T.lbound(firstDim), all) = left_wall;   
    if (Isub == num_subdom_x - 1)
      T(T.ubound(firstDim), all) = right_wall;
    /*
    if (Jsub == 0)
      T(all, T.lbound(firstDim)) = left_wall;   
    if (Jsub == num_subdom_y - 1)
      T(all, T.ubound(secondDim), all) = right_wall;
    */
   
    initialVelocity(un, vn, dx, dy, amplitude, length_x, lambda, shiftx, shifty);
    NumUtils::staggerFromNodesX(us, un);
    NumUtils::staggerFromNodesY(vs, vn);

    ostringstream dir_number;
    dir_number.width(2);
    dir_number.fill('0');
    dir_number << rank;
    string dnumber = dir_number.str();
    string filenameV = "./DataConvForced2D/" + dnumber + "/velc.";
    string filenameT = "./DataConvForced2D/" + dnumber + "/temp.";

    InOut::writeToFile_DXP(un, vn, 0, filenameV, dx, shiftx, dy, shifty);
    InOut::writeToFile_DXP(Tn, 0, filenameT, dx, shiftx, dy, shifty);
    //    InOut::writeToFileP(Tn, 0, filenameT, dx, shiftx, dy, shifty);

    SparseMatrix< Diagonal<double, 2 > > A(num_nodes_x, num_nodes_y); 
    ScalarField2D                        b(num_nodes_x, num_nodes_y);

    ScalarEquation<Upwind_CoDi<double,2> > conv_diff(T,A,b,mesh.getDeltas());    
    conv_diff.setDeltaTime(dt);
    conv_diff.setGamma(1.0);
    conv_diff.setUvelocity(us);
    conv_diff.setVvelocity(vs);
    subdom.setRealDirichlet(conv_diff, LEFT_WALL);
    subdom.setRealDirichlet(conv_diff, RIGHT_WALL);
    //    subdom.setRealDirichlet(conv_diff, TOP_WALL);
    //    subdom.setRealDirichlet(conv_diff, BOTTOM_WALL);
    subdom.setRealNeumann(conv_diff, TOP_WALL);
    subdom.setRealNeumann(conv_diff, BOTTOM_WALL);
    subdom.setGhostDirichlet(conv_diff);
    conv_diff.print(rank);

    double error = 1.0, global_error = 1.0, residual;
    int iteration = 0;

    while ( (global_error > tolerance) && (iteration < steps) ) {

      for (int iddm = 1; iddm <= ddm_iter; iddm++) {
	conv_diff.calcCoefficients();
	Solver::TDMA2DY(conv_diff, tolerance, tdma_iter);
	error = NumUtils::calcErrorL1(T, conv_diff.phi);
	
	subdom.infoExchange(conv_diff.phi); // T is updated with info from neighbors subdomains

	//	conv_diff.update();     // T is updated with the new solution
	//	subdom.infoExchange(T); // T is updated with info from neighbors subdomains
	//	conv_diff.updatePhi(T); // phi is updated with T
      }

      conv_diff.update();     // T is updated with the new solution

      if ( !(iteration % frequency) ) {
	NumUtils::interpolateToNodes(Tn, T);
	InOut::writeToFile_DXP(Tn, iteration, filenameT, dx, shiftx, dy, shifty);
	//	InOut::writeToFileP(Tn, iteration, filenameT, dx, shiftx, dy, shifty);
      }

      //      subdom.infoExchange1(T);
      //      MPI::COMM_WORLD.Allreduce(&error, &global_error, 
      //      				1, MPI::DOUBLE, 
      //      				MPI::MAX);
      MPI_Allreduce(&error, &global_error, 1, MPI_DOUBLE, MPI_MAX, cart.comm);

      if (rank == FLAG ) {
	std::cout << "\n ----- ( " << Isub << ", " << Jsub << ") | Iter = " 
		  << iteration
		  << "\t Err = " << error;
      }

      iteration++;
    }

    //    MPI::Finalize();
    MPI_Finalize();

    return 0;
}


bool read_data_and_Bcast(CartComm<2>& cart) 
{
  int    data_Int[12];
  double data_Flt[5];
  if (rank == 0) {
    std::cout << "\n\n"
	      << " +----------------------------------------+\n"
	      << " |    TUNA FOR PDE SOLVING IN PARALLEL    |\n"
	      << " +----------------------------------------+\n"
	      << " | MASTER PROCESS rank = " << rank << "\n"
	      << " | No subdomains in x-axis = " << num_subdom_x << "\n"
	      << " | No subdomains in y-axis = " << num_subdom_y << "\n"
	      << " +----------------------------------------+\n"
	      << "\n";
	
// ----- Reading data from "input" file	
	std::ifstream input_cin ("inputConvForcedPar2D");
	input_cin >> data_Flt[0]   // length in x-axis
		  >> data_Flt[1]   // length in y-axis
		  >> data_Int[0]   // Num of nodes in x-axis for each subdomain
		  >> data_Int[1]   // Num of nodes in y-axis for each subdomain
		  >> data_Int[2]  // Overlapping left
		  >> data_Int[3]  // Overlapping right
		  >> data_Int[4]  // Overlapping down
		  >> data_Int[5]  // Overlapping up
		  >> data_Flt[2]   // Time step
		  >> data_Int[6]   // Max number of iterations (Steps)
		  >> data_Flt[3]   // Tolerance
		  >> data_Int[7]   // Max iterations for TDMA algo
		  >> data_Int[8]   // Number of rolls for the initial condition
		  >> data_Flt[4]   // Amplitude for the initial condition
		  >> data_Int[9]  // Print frequency
		  >> data_Int[10] // FLAG
		  >> data_Int[11]; // DDM iterations
	input_cin.close();
    }
// ----- Broadcast the info to all processors
  MPI_Bcast(data_Int, 12, MPI::INT, 0, cart.comm);    
  MPI_Bcast(data_Flt, 5, MPI::DOUBLE, 0, cart.comm);

// ----- Using global variables.
    length_x    = data_Flt[0];
    length_y    = data_Flt[1];
    dt          = data_Flt[2];
    tolerance   = data_Flt[3];
    amplitude   = data_Flt[4];

    num_nodes_x = data_Int[0];
    num_nodes_y = data_Int[1];
    nc_ovlp_l   = data_Int[2]; 
    nc_ovlp_r   = data_Int[3];
    nc_ovlp_d   = data_Int[4]; 
    nc_ovlp_u   = data_Int[5];    
    steps       = data_Int[6];
    tdma_iter   = data_Int[7];
    lambda      = data_Int[8];
    frequency   = data_Int[9];
    FLAG        = data_Int[10];
    ddm_iter    = data_Int[11];

    return 0;
}




