#ifndef __MULTIBLOCK_GRID_H__

#define __MULTIBLOCK_GRID_H__

#include "config.h"
#include "mpi.h" 
//#include <blitz/array.h>
#include <fstream>
#include<sstream>

extern "C" {
  #include "bsparti.h"
  #include "addition.h"
  #include "birreg.h"
};

template <INT ndim> struct cut_info{
  INT firstBlock;
  INT secondBlock;
  INT firstBlockMaxIdx[ndim];
  INT firstBlockMinIdx[ndim];
  INT secondBlockMaxIdx[ndim];
  INT secondBlockMinIdx[ndim];  
};

template <INT ndim> class MBGrid{
  public: 
   /// Class Constructors
   MBGrid( MPI_Comm &mpi_comm ) 
   {
      _mpi_comm = mpi_comm;
      setDefaultClassValues();   
      parti_setup( &_mpi_comm );
      MPI_Comm_rank( _mpi_comm , &_rank );
      std::stringstream ss;
      ss << "log_proc_" << _rank ;
      _local_log_file.open(ss.str().c_str());
      _global_nblocks = 0;
   } 

   MBGrid( ) 
   { 
      _mpi_comm = MPI_COMM_WORLD;
      setDefaultClassValues();
      parti_setup( &_mpi_comm ); 
      MPI_Comm_rank( _mpi_comm , &_rank ); 
      std::stringstream ss;
      ss << "log_proc_" << _rank ;
      _local_log_file.open(ss.str().c_str());
      _global_nblocks = 0; 
   }

   ~MBGrid( ) 
   {
      for( INT i = 0 ; i < _global_nblocks ; ++i ){
        parti_free_decomp( _decomp[i] );
        parti_free_darray( _darray[i] );
      }
      cleanup_after_PARTI();
   }
   /// Input Routines
   /// \brief Set the total number of internal ghost cells
   void setInternalGhostCells( INT in_pad )
   {
      _in_ghost = in_pad;
   }
   /// \brief Set the total number of external ghost cells
   void setExternalGhostCells( INT ext_pad )
   {
      _ext_ghost = ext_pad;
   }
   /// \brief Read grid from a Plot3d file
   void readPlot3DFile( const char *file )
   {
      std::ifstream fin(file);
      fin >> _global_nblocks;
      _global_blk_size = new INT[ ndim * _global_nblocks ];
      _global_xadj     = new INT[ _global_nblocks + 1 ];
      _global_xadj[0] = 0;
      for( INT i = 0 ; i < ndim * _global_nblocks ; ++i )
        fin >> _global_blk_size[i];
      _global_size = 0;
      for( INT i = 0 ; i < _global_nblocks ; ++i ){  
      INT prod = 1;
        for( INT j = 0 ; j < ndim ; ++j ){
          prod *= _global_blk_size[ ndim * i + j ]; 
        }
        _global_size += prod;
        _global_xadj[i+1] = _global_xadj[i] + prod;
      }
      _file = file;
      if( _rank == 0 ) printGridStats(); /// Print stats 
  }

   /// Boundary Condition Related Routine
   /// \brief Set the total number of cut-boundary segments
   void setNumCutBCSeg( INT nseg );
   /// \brief Set the cut-boundary infromation one by one 
   void setCutBCSegInfo( INT cut , INT *info );
   /// \brief Check if the given cut-boundary infromation are valid
   void validateCutBC();

   /// MPI specific routines
   /// \brief Decompose the domain into processors specified using the 
   ///        _mpi_comm object
   void decompose()
   {
     allocatePartiVars();
     createVirtualProcessor();
     printDecompStats(); 
   }  
   /// \brief Allocate the local block grid variable 
   void allocateLocalBlock(){
     //formLocalXadjncy();
     //allocLocalGridArray();
   }
   /// \brief Build the external and internal ghost cell movement schedule a/c procs
   void buildSchedule(){
    //buildInternalGhostSchedule();
    //buildExternalGhostSchedule();
    //combineAllSchedules();
   }

   /// \brief Exchange the ghost boundary data across the processors 
   void exchangeGridData();
 
  private:
   INT _global_size, _global_nblocks;
   INT *_global_blk_size; 
   INT _in_ghost[ndim] , _ext_ghost[ndim] , *_global_xadj;
   MPI_Comm _mpi_comm;
   cut_info<ndim> *_global_cut_info;
   const char *_file;

   INT _local_size, _local_nblocks, _local_nseg;
   INT *_local_blk_size, *_non_zero_blocks;
   INT *_lbound , *_ubound;
   INT *_local_block_xadj;
   REAL *_local_blk_grid;
   REAL *_local_blk_grid_metrics;
   cut_info<ndim> *_local_cut_info;
   INT _rank;
   std::ofstream _local_log_file;
   /// Multiblock Parti Variables
   DECOMP **_decomp;
   VPROC *_vproc;
   DARRAY **_darray;
   SCHED *_sub_array_sched;
   SCHED *_blk_array_sched;
 
   void setNumBlock( INT nblock );
   void setBlockSize( INT block , INT *size );
   void setAllBlockSize( INT *all_size ); 
   void setDefaultClassValues()
   {
      for(INT i = 0 ; i < ndim ; ++i ) _in_ghost[i]  = 0;
      for(INT i = 0 ; i < ndim ; ++i ) _ext_ghost[i] = 0;
   } 

   void printGridStats()
   {
     _local_log_file << "Totally " << _global_nblocks << " blocks in file " << _file << "\n";
     _local_log_file << "Total Node Count = " << _global_size << "\n";
     for( INT i = 0 ; i < _global_nblocks ; ++i ) {
       _local_log_file << "Block " << i << " [ ";
       for( INT j = 0 ; j < ndim ; ++j ) {
         _local_log_file << _global_blk_size[ ndim * i + j ] << " " ; 
       }
       _local_log_file << "]\n";
     }
     _local_log_file << "XADJ : [ " ;
     for( INT i = 0 ; i < _global_nblocks + 1 ; ++i )
       _local_log_file << _global_xadj[i] << "  ";
     _local_log_file << "]\n"; 
   }

   void allocatePartiVars()
   {
      _darray = new  DARRAY*[ _global_nblocks ];
      _decomp  = new DECOMP* [ _global_nblocks ]; 
   }

   void createVirtualProcessor()
   {          
     char c_info[ndim];
     for( INT i = 0 ; i < ndim ; ++i ) c_info[i] = 'B';
     INT dim[ndim];
     for( INT i = 0 ; i < ndim ; ++i ) dim[i] = i;
     INT extra_flag = 0; 
     _vproc = vProc( 1 , &_global_size ); 
     for( INT i = 0 ; i < _global_nblocks ; ++i ){
       _decomp[i] = create_decomp( ndim , &_global_blk_size[ ndim * i ] );
       embed( _decomp[i] , _vproc , _global_xadj[i] , _global_xadj[i + 1] - 1 );
       distribute( _decomp[i] , c_info );
       _darray[i] = align( _decomp[i] , ndim , dim , 
                           &_global_blk_size[ ndim * i ] , 
                           _in_ghost , _in_ghost , _ext_ghost, 
                           _ext_ghost , &extra_flag , dim );
     }
   }
   
   void printDecompStats()
   { 
      _local_blk_size  = new INT[ _global_nblocks * ndim ];
      _non_zero_blocks = new INT[ _global_nblocks ];
      _lbound          = new INT[ndim * _global_nblocks];
      _ubound          = new INT[ndim * _global_nblocks];
 
      for( INT i = 0 ; i < _global_nblocks ; ++i ) {
        laSizes( _darray[i] , _local_blk_size + i * ndim );
        for( INT j = 0 ; j < ndim ; ++ j ){ 
          _lbound[ i * ndim + j ]  = gLBnd( _darray[i] , j );
          _ubound[ i * ndim + j ]  = gUBnd( _darray[i] , j );
        }
        if( *( _local_blk_size + i * ndim )  == 0 )
          _non_zero_blocks[i] = -1;
        else
          _non_zero_blocks[i] = 0; 
      }
      _local_size = 0;
      _local_log_file << "Processor " << _rank << " has blocks [ ";
      for( INT i = 0 ; i < _global_nblocks ; ++i ) {
        if( _non_zero_blocks[i] == 0 ){
           _local_log_file << i << "( ";
           INT temp_prod = 1;
           for( INT j = 0 ; j < ndim ; ++j ){
             temp_prod *= _local_blk_size[ ndim * i + j ];
             _local_log_file << _local_blk_size[ ndim * i + j ] << " ";
           }
           _local_size += temp_prod;
           _local_log_file << ")  ";
        }
      }       
      _local_log_file << "] --> " << double(_local_size)/double(_global_size)*100 << "%\n";
      _local_log_file << "Block Bounds in Processor " << _rank << "\n";
      for( INT i = 0 ; i < _global_nblocks ; ++i ) {
        if( _non_zero_blocks[i] == 0 ){
          _local_log_file << "Block " << i << " Lower Bounds [ ";
          for( INT j = 0 ; j < ndim ; ++j )
             _local_log_file << _lbound[ i * ndim + j ] << " ";
          _local_log_file << "]\n";
           _local_log_file << "Block " << i << " Upper Bounds [ ";
          for( INT j = 0 ; j < ndim ; ++j )
             _local_log_file << _ubound[ i * ndim + j ] << " ";
          _local_log_file << "]\n"; 
        }
      }
   }
};


#endif
 
