!! Copyright (C) 2009,2010,2011,2012  Marco Restelli
!!
!! This file is part of:
!!   LDGH -- Local Hybridizable Discontinuous Galerkin toolkit
!!
!! LDGH is free software: you can redistribute it and/or modify it
!! under the terms of the GNU General Public License as published by
!! the Free Software Foundation, either version 3 of the License, or
!! (at your option) any later version.
!!
!! LDGH is distributed in the hope that it will be useful, but WITHOUT
!! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
!! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
!! License for more details.
!!
!! You should have received a copy of the GNU General Public License
!! along with LDGH. If not, see <http://www.gnu.org/licenses/>.
!!
!! author: Marco Restelli                   <marco.restelli@gmail.com>


!>\brief
!! This module collects all the information related to the boundary
!! conditions, including neighboring relationships for domain
!! decomposition.
!!
!! \n
!!
!! A boundary condition is specified by the type of the boundary
!! condition itself and an entity to which it applies. Currently, we
!! consider three types:
!! <ul>
!!  <li> Dirichlet: \c b_dir
!!  <li> Neumann: \c b_neu
!!  <li> domain decomposition: \c b_ddc
!! </ul>
!! and two entities to which such a condition can be attached:
!! <ul>
!!  <li> boundary vertices: \c t_b_v
!!  <li> boundary sides: \c t_b_s.
!! </ul>
!!
!! We also define boundary elements (of type <tt>t_b_e</tt>), but we
!! don't attach a boundary condition to them (see the details later).
!! A boundary element if defined as one element which has at least one
!! boundary side.
!!
!! Each boundary condition can be further specified by an integer
!! value, stored in the field <tt>btype</tt> of the boundary entities,
!! which meaning is not specified in this module. In fact, in this
!! module the values of <tt>btype</tt> are simply copied in the
!! corresponding field.
!!
!! On input, we assume that each boundary side belongs to a boundary
!! region, and that a boundary condition is specified for each
!! boundary region. This defines uniquely the boundary condition on
!! all the boundary sides, while there is some arbitrariness in the
!! way these conditions are propagated to boundary vertices. In this
!! respect, we proceed as follows:
!! <ol>
!!  <li> assign each boundary vertex (i.e. each vertex which is
!!  connected to at least one boundary side) to a boundary side (thus
!!  assigning the vertex also to the boundary region of the side)
!!  <li> assign to each boundary vertex the boundary condition of the
!!  corresponding boundary side.
!! </ol>
!! The arbitrariness now lays in the first step, since each boundary
!! vertex is typically connected to many boundary sides (and hence
!! boundary regions), each of which can specify a different boundary
!! condition or boundary condition type. In view of the typical finite
!! element implementations, we consider the following algorithm:
!! consider all the boundary sides connected to the vertex,
!! <ul>
!!  <li> if there is at least one connected side of Dirichlet type,
!!  assign the vertex to the boundary region of the connected
!!  Dirichlet boundary side with the smallest boundary region index;
!!  <li> else, if there is at least one connected side of Neumann
!!  type, assign the vertex to the boundary region of the connected
!!  Neumann boundary side with the smallest boundary region index;
!!  <li> else, if there is at least one connected side of domain
!!  decomposition type, assign the vertex to the boundary region of
!!  the connected domain decomposition boundary side with the smallest
!!  boundary region index.
!! </ul>
!!
!! \section ddc_bcs Domain decomposition information
!!
!! Domain decomposition boundary conditions often require additional
!! work compared to Dirichlet or Neumann ones, since it is important
!! to know the index of the neighboring domain, and the index of the
!! corresponding entity. For this reason, the types \c t_b_v and \c
!! t_b_s include a dedicated field \c ddc which is true if the enetity
!! is shared by at least two subdomains. For boundary sides, \c ddc is
!! essntially a synonim of <tt>bctype.eq.b_ddc</tt>, while for
!! boundary vertices \c ddc can be true even if \c bctype indicates \c
!! b_dir or \c b_neu, since a vertex can be on a Dirichlet or Neumann
!! boundary and still be shared by two or more processors. In both
!! cases, \c ddc is equivalent to <tt>ddc_grid%gv(iv)%ni.gt.0</tt> or
!! <tt>ddc_grid%gs(is)%ni.gt.0</tt>, for vertexes and sides, and the
!! additional information can then be recovered from a \c ddc_grid
!! object.
!!
!! \note In the case of domain decomposition, it is possible that the
!! boundary region and side which a vertex is assigned to (and thus
!! the type of boundary condition of the vertex) is defined by a
!! different subdomain. For this reason, some communication is
!! required to synchronize all the subdomains.
!!
!! \section ghost_entities Ghost entities
!!
!! Ghost entities are collected in private components of the \c t_bcs
!! type, so that they are accessible only through "pointer to
!! neighbour" type fields of the real entities.
!!
!! \note In order to ensure that some code can work seamlessly for
!! real and ghost entities, the neighbour entities must be accessed by
!! pointers and not by indexes.
!!
!! In the present implementation, the sole ghost entities are the
!! elements connected to ddc sides. Each boundary side contains a copy
!! of the side itself, except that the pointers to the neighbour
!! elements point to the ghost element. The ghost element in turns
!! inherits most of its properties from the real element of the
!! neighbouring subdomain (properties which do not make sense for a
!! ghost element, such as the vertex indexes, are left uninitialized).
!<----------------------------------------------------------------------
module mod_bcs

!-----------------------------------------------------------------------

 use mod_messages, only: &
   mod_messages_initialized, &
   error,   &
   warning, &
   info

 use mod_kinds, only: &
   mod_kinds_initialized, &
   wp

 use mod_mpi_utils, only: &
   mod_mpi_utils_initialized, &
   mpi_integer, wp_mpi,  &
   mpi_alltoallv

 use mod_octave_io, only: &
   mod_octave_io_initialized, &
   write_octave

 use mod_perms, only: &
   mod_perms_initialized, &
   operator(*),     &
   operator(**),    &
   idx

 use mod_grid, only: &
   mod_grid_initialized, &
   t_v, t_s, t_e,       &
   p_t_v, p_t_s, p_t_e, &
   t_grid, t_ddc_grid

!-----------------------------------------------------------------------
 
 implicit none

!-----------------------------------------------------------------------

! Module interface

 public :: &
   mod_bcs_constructor, &
   mod_bcs_destructor,  &
   mod_bcs_initialized, &
   t_bcs,                     &
   p_t_b_v, p_t_b_s, p_t_b_e, &
   b_dir,   b_neu,   b_ddc,   &
   new_bcs, clear,            &
   write_octave

 private

!-----------------------------------------------------------------------

! Module types and parameters

 ! public members

 ! module constants
 integer, parameter :: &
   b_dir = 1, &
   b_neu = 2, &
   b_ddc = 3, &
   ! The next value is defined for convenience and it is different
   ! from any defined b_* parameter.
   b_null = -1

 ! pointer arrays
 type p_t_b_v
   type(t_b_v), pointer :: p => null()
 end type p_t_b_v
 type p_t_b_s
   type(t_b_s), pointer :: p => null()
 end type p_t_b_s
 type p_t_b_e
   type(t_b_e), pointer :: p => null()
 end type p_t_b_e

 !> boundary vertex
 type t_b_v
   type(t_v), pointer :: v => null() !< corresponding vertex object
   integer :: bc !< boundary condition: \c b_dir, \c b_neu or \c b_ddc
   integer :: btype !< the user provided index is copied here
   integer :: breg  !< boundary region
   logical :: ddc = .false. !< shared vertex
 end type t_b_v

 !> boundary side
 type t_b_s
   type(t_s), pointer :: s => null() !< corresponding side object
   integer :: bc !< boundary condition: \c b_dir, \c b_neu or \c b_ddc
   integer :: btype !< the user provided index is copied here
   logical :: ddc = .false. !< shared side
   type(t_s) :: s_copy !< side copy with pointer to ghost element
 end type t_b_s

 !> boundary element
 type t_b_e
   type(t_e), pointer :: e => null() !< corresponding element object
   integer :: nbv !< number of boundary vertices
   type(p_t_b_v), allocatable :: bv(:) !< boundary vertices
   integer :: nbs !< number of boundary sides
   type(p_t_b_s), allocatable :: bs(:) !< boundary sides
 end type t_b_e

 !> Boundary conditions
 !!
 !! The boundary conditions should be tested against the corresponding
 !! module parameters, and not according to their numerical value:
 !! <ul>
 !!  <li> <tt>if( b_vert(i)\%bc .eq. b_dir ) then</tt> \f$\mapsto\f$
 !!  correct
 !!  <li> <tt>if( b_vert(i)\%bc .eq. 1 ) then</tt> \f$\mapsto\f$
 !!  incorrect.
 !! </ul>
 !! 
 !! Two way indexing is provided as follows:
 !! <ul>
 !!  <li> boundary entity \f$\mapsto\f$ entity: \c b_vert and \c b_side
 !!  <li> entity \f$\mapsto\f$ boundary entity: \c b_v2bv and \c b_s2bs
 !! </ul>
 !! \warning \c b_s2bs is allocated only for the boundary sides, since
 !! these are known to be the last ones in the grid:
 !! <tt>grid\%s(grid\%ni+1:grid\%ns)</tt>. To know whether a side is
 !! on the boundary, test <tt>if( is .gt. grid\%ni )</tt>
 !! 
 !! Two arrays \c b_e2be and \c b_elem are also defined, however a
 !! boundary condition is not defined for the whole element and \c
 !! b_elem simply provides pointers to the relevant elements of \c
 !! b_vert and \c b_side. The reason for providing \c b_e2be is to
 !! make as simple as possible the test on whether an element requires
 !! some boundary contributions at all: \code
 !!   if( associated(b_e2be(ie)) ) then
 !!     ! add aboundary contributions
 !!   endif
 !! \endcode
 !<
 type t_bcs
   integer :: nbreg   !< number of boundary regions
   integer :: nb_vert !< size of \c b_vert
   integer :: nb_side !< size of \c b_side
   integer :: nb_elem !< size of \c b_elem
   type(t_b_v), allocatable :: b_vert(:)
   type(t_b_s), allocatable :: b_side(:)
   type(t_b_e), allocatable :: b_elem(:)
   type(p_t_b_v), allocatable :: b_v2bv(:) !< \f$v \to v_b\f$
   type(p_t_b_s), allocatable :: b_s2bs(:) !< \f$s \to s_b\f$
   type(p_t_b_e), allocatable :: b_e2be(:) !< \f$e \to e_b\f$
   ! ghost entities
   type(t_e), allocatable, private :: ghost_elements(:)
 end type t_bcs

! Module variables

 ! public members
 logical, protected ::               &
   mod_bcs_initialized = .false.

 ! private members
 character(len=*), parameter :: &
   this_mod_name = 'mod_bcs'

 interface clear
   module procedure clear_bcs
 end interface

 interface write_octave
   module procedure write_bcs_struct
 end interface write_octave

!-----------------------------------------------------------------------

contains

!-----------------------------------------------------------------------

 subroutine mod_bcs_constructor()
  character(len=*), parameter :: &
    this_sub_name = 'constructor'

   !Consistency checks ---------------------------
   if( (mod_messages_initialized.eqv..false.)  .or. &
       (mod_kinds_initialized.eqv..false.)     .or. &
       (mod_mpi_utils_initialized.eqv..false.) .or. &
       (mod_octave_io_initialized.eqv..false.) .or. &
       (mod_perms_initialized.eqv..false.)     .or. &
       (mod_grid_initialized.eqv..false.) ) then
     call error(this_sub_name,this_mod_name, &
                'Not all the required modules are initialized.')
   endif
   if(mod_bcs_initialized.eqv..true.) then
     call warning(this_sub_name,this_mod_name, &
                  'Module is already initialized.')
   endif
   !----------------------------------------------

   mod_bcs_initialized = .true.
 end subroutine mod_bcs_constructor

!-----------------------------------------------------------------------
 
 subroutine mod_bcs_destructor()
  character(len=*), parameter :: &
    this_sub_name = 'destructor'
   
   !Consistency checks ---------------------------
   if(mod_bcs_initialized.eqv..false.) then
     call error(this_sub_name,this_mod_name, &
                'This module is not initialized.')
   endif
   !----------------------------------------------

   mod_bcs_initialized = .false.
 end subroutine mod_bcs_destructor

!-----------------------------------------------------------------------

 !> Set all the bcs information.
 !!
 !! Concerning the input: this module assumes that the boundary
 !! condition is specified for each boundary region in the input
 !! argument \c bc_type, except for ddc type sides which don't need to
 !! be included in \c bc_type at all.
 !<
 subroutine new_bcs(bcs,grid,bc_type,ddc_grid,comm)
  type(t_bcs), target, intent(out) :: bcs  !< boundary condition data
  type(t_grid), target, intent(in) :: grid !< grid
  !> <tt>bc_type(:,ibr)</tt> has two components specifying the
  !! boundary condition for the boundary region \c ibr.
  !! \li <tt>bc_type(1,ibr)</tt> is \c b_dir for Dirichlet bcs and \c
  !! b_neu for Neumann bcs (domain decomposition boundary conditions
  !! have a separate treatment and should not appear at all in \c
  !! bc_type)
  !! \li <tt>bc_type(2,ibr)</tt> can be any integer value and is
  !! copied in <tt>b_side(:)\%btype</tt>.
  !<
  integer, intent(in) :: bc_type(:,:)
  !> domain decomposition grid
  type(t_ddc_grid), intent(in), optional :: ddc_grid
  !> communicator
  integer, intent(in), optional :: comm

  integer :: iv, ivb, ivl, is, isb, isl, ie, ieb, breg, bc
  integer :: ddc_marker
  character(len=1000) :: message
  type(t_b_v), target :: bv_dummy
  type(t_b_e), target :: be_dummy
  type(p_t_b_v) :: bv_temp(grid%d+1)
  type(p_t_b_s) :: bs_temp(grid%d+1)
  character(len=*), parameter :: &
    this_sub_name = 'new_bcs'

  ! ddc specific variables
  logical :: ddc_present
  integer, parameter :: dim1 = 4
  integer :: ierr
  integer, allocatable :: sendbuf(:,:), recvbuf(:,:), ioff(:)

  ! To avoid cluttering this function, and since bcs must be a target
  ! anyway, it's better to define here some pointers
  integer, pointer :: nb_vert
  integer, pointer :: nb_side
  integer, pointer :: nb_elem
  type(t_b_v), pointer :: b_vert(:)
  type(t_b_s), pointer :: b_side(:)
  type(t_b_e), pointer :: b_elem(:)
  type(p_t_b_v), pointer :: b_v2bv(:)
  type(p_t_b_s), pointer :: b_s2bs(:)
  type(p_t_b_e), pointer :: b_e2be(:)
   nb_vert => bcs%nb_vert
   nb_side => bcs%nb_side
   nb_elem => bcs%nb_elem

   ! Number of boundary regions
   bcs%nbreg = size(bc_type,2)

   ! Boundary sides: immediately derived by the grid (last ones)
   allocate(bcs%b_s2bs(grid%ni+1:grid%ns)); b_s2bs => bcs%b_s2bs
   nb_side = grid%nb
   allocate(bcs%b_side(nb_side)); b_side => bcs%b_side

   ! Boundary vertex
   allocate(bcs%b_v2bv(grid%nv)); b_v2bv  => bcs%b_v2bv

   ! Boundary elements
   allocate(bcs%b_e2be(grid%ne)); b_e2be  => bcs%b_e2be

   ! Whether to check or not ddc stuff
   ddc_present = present(ddc_grid)
   if(ddc_present) then
     ddc_marker = ddc_grid%ddc_marker
   else
     ddc_marker = -1 ! ddc_marker is significant only if ddc_present
   endif

   ! fill the side structure and count nb_vert and nb_elem
   nb_vert = 0
   nb_elem = 0
   do is=grid%ni+1,grid%ns
     isb = is-grid%ni
     b_s2bs(is)%p => b_side(isb)
     b_side(isb)%s => grid%s(is)
     breg = -grid%s(is)%ie(2) ! boundary region (notice: breg>0 )
     if(ddc_present.and.(breg.eq.ddc_marker)) then ! found a ddc side
       b_side(isb)%bc = b_ddc
       ! b_side(isb)%btype left uninitialized
       b_side(isb)%ddc = .true.
     else
       bc = bc_type(1,breg)
       btype: select case(bc)
        case(b_dir)
         b_side(isb)%bc = b_dir
        case(b_neu)
         b_side(isb)%bc = b_neu
        case default
         write(message,'(A,I4,A,I7,A,I4,A)') &
           'Unknown boundary condition type "',bc,'" on side ',is, &
           ' on boundary region ',breg,'.'
         call error(this_sub_name,this_mod_name,message)
       end select btype
       b_side(isb)%btype = bc_type(2,breg)
       ! ddc left to .false.
     endif
     ! mark the vertexes
     do ivl=1,grid%d
       if(.not.associated(b_v2bv(grid%s(is)%iv(ivl))%p)) then
         b_v2bv(grid%s(is)%iv(ivl))%p => bv_dummy
         nb_vert = nb_vert+1
       endif
     enddo
     ! mark the element
     if(.not.associated(b_e2be(grid%s(is)%ie(1))%p)) then
       b_e2be(grid%s(is)%ie(1))%p => be_dummy
       nb_elem = nb_elem+1
     endif
   enddo

   ! now fill the vertexes
   allocate(bcs%b_vert(nb_vert)); b_vert => bcs%b_vert
   ivb = 0
   do iv=1,grid%nv
     boundary_vert_if: if(associated(b_v2bv(iv)%p)) then ! boundary vertex

       ivb = ivb+1
       b_v2bv(iv)%p => b_vert(ivb)
       b_vert(ivb)%v => grid%v(iv)
       b_vert(ivb)%bc = b_null
       ! now check the boundary data by looping on the connected
       ! boundary sides
       do isl=1,grid%v(iv)%ns
         is = grid%v(iv)%is(isl)
         if(is.gt.grid%ni) then ! found a connected boundary side
           call update_bv( b_vert(ivb) , b_s2bs(is)%p%bc , &
                    b_s2bs(is)%p%btype , -grid%s(is)%ie(2) )
         endif
       enddo

       ! A vertex can have ddc information even if it is not of b_ddc
       ! type (this happens because b_dir and b_neu take precedence
       ! over b_ddc)
       if(ddc_present) then ! otherwise no need to check
         b_vert(ivb)%ddc = ddc_grid%gv(iv)%ni.gt.0
       endif

     endif boundary_vert_if
   enddo

   ! Now synchronize the subdomains concerning boundary vertexes
   if(ddc_present) then
     ! It's safer to avoid call to mpi_alltoallv with empty arrays
     if(sum(ddc_grid%nnv_id).gt.0) then
       allocate( sendbuf(dim1,sum(ddc_grid%nnv_id)) , &
                 recvbuf(dim1,sum(ddc_grid%nnv_id)) )
       allocate(ioff(0:ddc_grid%nd-1));
       ioff(0) = 0
       do ie=1,ddc_grid%nd-1
         ioff(ie) = ioff(ie-1) + ddc_grid%nnv_id(ie-1)
       enddo
       ! fill sendbuff
       do ivb=1,ddc_grid%nnv ! loop on the ddc vertexes
         do is=1,ddc_grid%nv(ivb)%nd
           ie = ddc_grid%nv(ivb)%id(is) ! as temporary: neigh. subd.
           ioff(ie) = ioff(ie) + 1
           sendbuf(1,ioff(ie)) = ddc_grid%nv(ivb)%in(is) ! vertex index
           sendbuf(2,ioff(ie)) = b_v2bv( ddc_grid%nv(ivb)%i )%p%bc
           sendbuf(3,ioff(ie)) = b_v2bv( ddc_grid%nv(ivb)%i )%p%btype
           sendbuf(4,ioff(ie)) = b_v2bv( ddc_grid%nv(ivb)%i )%p%breg
         enddo
       enddo
       ! reset the offsets
       ioff(0) = 0
       do ie=1,ddc_grid%nd-1
         ioff(ie) = ioff(ie-1) + ddc_grid%nnv_id(ie-1)
       enddo
       call mpi_alltoallv(                                           &
              sendbuf, dim1*ddc_grid%nnv_id, dim1*ioff, mpi_integer, &
              recvbuf, dim1*ddc_grid%nnv_id, dim1*ioff, mpi_integer, &
                          comm,ierr)
       ! now make the necessary corrections
       do ivb=1,size(recvbuf,2)
         call update_bv( b_v2bv(recvbuf(1,ivb))%p , recvbuf(2,ivb) , &
                                recvbuf(3,ivb)    , recvbuf(4,ivb) )
       enddo
       deallocate( sendbuf , recvbuf , ioff )
     endif
   endif

   ! now fill the elements
   allocate(bcs%b_elem(nb_elem)); b_elem => bcs%b_elem
   ieb = 0
   do ie=1,grid%ne
     if(associated(b_e2be(ie)%p)) then ! boundary element
       ieb = ieb+1
       b_e2be(ie)%p => b_elem(ieb)
       b_elem(ieb)%e => grid%e(ie)
       ! boundary connectivity
       b_elem(ieb)%nbv = 0
       do ivl=1,grid%d+1 ! loop on the vertexes
         iv = grid%e(ie)%iv(ivl)
         if(associated(b_v2bv(iv)%p)) then
           b_elem(ieb)%nbv = b_elem(ieb)%nbv+1
           bv_temp(b_elem(ieb)%nbv)%p => b_v2bv(iv)%p
         endif
       enddo
       allocate(b_elem(ieb)%bv(b_elem(ieb)%nbv))
       b_elem(ieb)%bv = bv_temp(1:b_elem(ieb)%nbv)

       b_elem(ieb)%nbs = 0
       do isl=1,grid%d+1 ! loop on the sides
         is = grid%e(ie)%is(isl)
         if(is.gt.grid%ni) then
           b_elem(ieb)%nbs = b_elem(ieb)%nbs+1
           bs_temp(b_elem(ieb)%nbs)%p => b_s2bs(is)%p
         endif
       enddo
       allocate(b_elem(ieb)%bs(b_elem(ieb)%nbs))
       b_elem(ieb)%bs = bs_temp(1:b_elem(ieb)%nbs)
     endif
   enddo

   ! Finally the ghost cells
   if(ddc_present) &
     call define_ghost_entities(bcs,grid,ddc_grid,comm)

 contains

  pure subroutine update_bv(bv,bc,btype,breg)
  ! Make sure bv%bc is set on input
   type(t_b_v), intent(inout) :: bv
   integer, intent(in) :: bc, btype, breg

    select case(bc)

     case(b_dir)
      if(bv%bc.ne.b_dir) then ! first Dirichlet condition
        bv%bc    = b_dir
        bv%btype = btype
        bv%breg  = breg
        ! nothing to do for the ddc fields
      else ! already of dir. type
        if(bv%breg.gt.breg) then
          bv%btype = btype
          bv%breg  = breg
        endif
      endif

     case(b_neu)
      if(bv%bc.ne.b_dir) then ! Dir. sides take precedence
        if(bv%bc.ne.b_neu) then ! first Neumann side
          bv%bc    = b_neu
          bv%btype = btype
          bv%breg  = breg
          ! nothing to do for the ddc fields
        else ! already of neu. type
          if(bv%breg.gt.breg) then
            bv%btype = btype
            bv%breg  = breg
          endif
        endif
      endif

     case(b_ddc)
      if((bv%bc.ne.b_dir).and.(bv%bc.ne.b_neu)) then
        if(bv%bc.ne.b_ddc) then ! first ddc side
          bv%bc    = b_ddc
          ! bv%btype left uninitialized
          bv%breg  = breg ! = ddc_marker
          bv%ddc   = .true.
        else ! already of ddc type
          ! Notice that in practice the following will never be
          ! executed, since breg is always ddc_marker
          if(bv%breg.gt.breg) then
            ! bv%btype left uninitialized
            bv%breg  = breg ! = ddc_marker
          endif
        endif
      endif
      
    end select

  end subroutine update_bv

 end subroutine new_bcs

!-----------------------------------------------------------------------
 
 !> Define ghost elements for ddc sides.
 !!
 !! The side copy contains the following corrections compared to the
 !! original:
 !! <ul>
 !!  <li> <tt>e(2)</tt> points to the ghost element
 !!  <li> <tt>isl(2)</tt> gives the local position on the ghost element
 !! </ul>
 !! In the ghost element, the following fields are set (assuming \c
 !! isl is the local position of the side on the ghost element):
 !! <ul>
 !!  <li> <tt>d</tt>
 !!  <li> <tt>m</tt>
 !!  <li> <tt>pi(isl)</tt>
 !!  <li> <tt>ip(isl)</tt>
 !!  <li> <tt>e(isl)</tt>
 !!  <li> <tt>iel(isl)</tt>
 !!  <li> <tt>xb</tt>
 !!  <li> <tt>vol</tt>
 !!  <li> <tt>b</tt>
 !!  <li> <tt>det_b</tt>
 !!  <li> <tt>bi</tt>
 !!  <li> <tt>x0</tt>
 !!  <li> <tt>n</tt>
 !! </ul>
 !<
 subroutine define_ghost_entities(bcs,grid,ddc_grid,comm)
  type(t_grid), target, intent(in) :: grid
  type(t_ddc_grid),     intent(in) :: ddc_grid
  integer,              intent(in) :: comm
  type(t_bcs),  target, intent(inout) :: bcs
 
  integer :: iim1, rim1, i, id, is, ie, ierr, isl, ii
  integer, allocatable :: ioff(:), iendbuf(:,:), iecvbuf(:,:)
  real(wp), allocatable :: rendbuf(:,:), recvbuf(:,:)
  type(t_s), pointer :: s
  type(t_e), pointer :: e
  character(len=*), parameter :: &
    this_sub_name = 'define_ghost_entities'

   if(ddc_grid%nns.eq.0) then ! nothing to do
     allocate( bcs%ghost_elements(0) )
     return
   endif

   ! 1) Communication
   allocate(ioff(0:ddc_grid%nd-1))
   ioff(0) = 0
   do i=1,ddc_grid%nd-1
     ioff(i) = ioff(i-1) + ddc_grid%nns_id(i-1)
   enddo
   iim1 = 6
   rim1 = 2*grid%d + 2 + 2*grid%m*grid%d + grid%m*(grid%d+1)
   allocate( iendbuf(iim1,ddc_grid%nns) , iecvbuf(iim1,ddc_grid%nns) , &
             rendbuf(rim1,ddc_grid%nns) , recvbuf(rim1,ddc_grid%nns) )
   do i=1,ddc_grid%nns
     id = ddc_grid%ns(i)%id
     s => grid%s(ddc_grid%ns(i)%i)
     e => s%e(1)%p
     ioff(id) = ioff(id) + 1

     ! integer buffer
     is = 1; ie = is ! local position on neighbour
     iendbuf(is,ioff(id)) = ddc_grid%ns(i)%in
     is = ie + 1; ie = is ! side isl
     iendbuf(is,ioff(id)) = s%isl(1)
     is = ie + 1; ie = is ! element fields
     iendbuf(is,ioff(id)) = e%d
     is = ie + 1; ie = is
     iendbuf(is,ioff(id)) = e%m
     is = ie + 1; ie = is
     iendbuf(is,ioff(id)) = e%pi(s%isl(1))
     is = ie + 1; ie = is
     iendbuf(is,ioff(id)) = e%ip(s%isl(1))

     ! real buffer
     is = 1; ie = is + e%m - 1
     rendbuf(is:ie,ioff(id)) = e%xb
     is = ie + 1; ie = is
     rendbuf(is   ,ioff(id)) = e%vol
     is = ie + 1; ie = is + e%m*e%d - 1
     rendbuf(is:ie,ioff(id)) = reshape( e%b , (/e%m*e%d/) )
     is = ie + 1; ie = is
     rendbuf(is   ,ioff(id)) = e%det_b
     is = ie + 1; ie = is + e%d*e%m - 1
     rendbuf(is:ie,ioff(id)) = reshape( e%bi , (/e%d*e%m/) )
     is = ie + 1; ie = is + e%m - 1
     rendbuf(is:ie,ioff(id)) = e%x0
     is = ie + 1; ie = is + e%m*(e%d+1) - 1
     rendbuf(is:ie,ioff(id)) = reshape( e%n , (/e%m*(e%d+1)/) )
   enddo
   ioff(0) = 0
   do i=1,ddc_grid%nd-1
     ioff(i) = ioff(i-1) + ddc_grid%nns_id(i-1)
   enddo
   call mpi_alltoallv(                                      &
     iendbuf, iim1*ddc_grid%nns_id, iim1*ioff, mpi_integer, &
     iecvbuf, iim1*ddc_grid%nns_id, iim1*ioff, mpi_integer, &
                       comm,ierr )
   call mpi_alltoallv(                                      &
     rendbuf, rim1*ddc_grid%nns_id, rim1*ioff, wp_mpi, &
     recvbuf, rim1*ddc_grid%nns_id, rim1*ioff, wp_mpi, &
                       comm,ierr )

   ! 2) Fill the ghost elements
   allocate(bcs%ghost_elements(ddc_grid%nns))
   do i=1,ddc_grid%nns
     s => grid%s(iecvbuf(1,i)) ! local element
     e => bcs%ghost_elements(i) ! ghost element

     ! build the side copy
     bcs%b_s2bs(s%i)%p%s_copy        = s
     bcs%b_s2bs(s%i)%p%s_copy%e(2)%p => e
     is = 2; ie = is
     bcs%b_s2bs(s%i)%p%s_copy%isl(2) = iecvbuf(is,i)
     isl = bcs%b_s2bs(s%i)%p%s_copy%isl(2) ! useful later
     
     ! fill the ghost element
     is = is + 1; ie = is
     e%d = iecvbuf(is,i)
     is = is + 1; ie = is
     e%m = iecvbuf(is,i)
     is = is + 1; ie = is ! pi: element to side
     allocate(e%pi(isl:isl))
     ii = ddc_grid%ns(ddc_grid%gs(s%i)%ni)%p_s2s 
     e%pi = idx(  grid%me%pi_tab(      ii     )**(-1) &
                * grid%me%pi_tab(iecvbuf(is,i))       )
     is = is + 1; ie = is
     allocate(e%ip(isl:isl))
     e%ip = idx(  grid%me%pi_tab(iecvbuf(is,i)) &
                * grid%me%pi_tab(      ii     ) )
     ! if everything is correct, pi and ip should be inverse
     allocate(e%e  (isl:isl)); e%e  (isl)%p => grid%e(s%ie(1))
     allocate(e%iel(isl:isl)); e%iel(isl)   =  s%isl(1)

     is = 1; ie = is + e%m - 1
     allocate(e%xb(e%m)); e%xb = recvbuf(is:ie,i)
     is = ie + 1; ie = is
                          e%vol = recvbuf(is,i)
     is = ie + 1; ie = is + e%m*e%d - 1
     allocate(e%b(e%m,e%d)); e%b = reshape(recvbuf(is:ie,i),(/e%m,e%d/))
     is = ie + 1; ie = is
                          e%det_b = recvbuf(is,i)
     is = ie + 1; ie = is + e%d*e%m - 1
     allocate(e%bi(e%d,e%m));e%bi= reshape(recvbuf(is:ie,i),(/e%d,e%m/))
     is = ie + 1; ie = is + e%m - 1
     allocate(e%x0(e%m)); e%x0 = recvbuf(is:ie,i)
     is = ie + 1; ie = is + e%m*(e%d+1) - 1
    allocate(e%n(e%m,e%d+1));e%n=reshape(recvbuf(is:ie,i),(/e%m,e%d+1/))

   enddo
 
   deallocate( iendbuf , iecvbuf , rendbuf , recvbuf , ioff )
 end subroutine define_ghost_entities
 
!-----------------------------------------------------------------------

 pure subroutine clear_bcs( bcs )
  type(t_bcs), intent(out) :: bcs
 end subroutine clear_bcs
 
!-----------------------------------------------------------------------

 subroutine write_bcs_struct(bcs,var_name,fu)
 ! octave output for the boundary conditions
  integer, intent(in) :: fu
  type(t_bcs), intent(in) :: bcs
  character(len=*), intent(in) :: var_name
 
  integer :: i
  integer, allocatable :: tmp(:,:)
  character(len=*), parameter :: &
    this_sub_name = 'write_ddc_grid_struct'
 
   write(fu,'(a,a)')    '# name: ',var_name
   write(fu,'(a)')      '# type: struct'
   write(fu,'(a)')      '# length: 6' ! number of fields

   ! field 01 : nbreg
   write(fu,'(a)')      '# name: nbreg'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   call write_octave(bcs%nbreg,'<cell-element>',fu)

   ! field 02 : nb_vert
   write(fu,'(a)')      '# name: nb_vert'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   call write_octave(bcs%nb_vert,'<cell-element>',fu)

   ! field 03 : nb_side
   write(fu,'(a)')      '# name: nb_side'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   call write_octave(bcs%nb_side,'<cell-element>',fu)

   ! field 04 : nb_elem
   write(fu,'(a)')      '# name: nb_elem'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   call write_octave(bcs%nb_elem,'<cell-element>',fu)

   ! field 05 : b_vert
   write(fu,'(a)')      '# name: b_vert'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   allocate(tmp(5,bcs%nb_vert))
   do i=1,bcs%nb_vert
     tmp(1,i) = bcs%b_vert(i)%v%i
     tmp(2,i) = bcs%b_vert(i)%bc
     tmp(3,i) = bcs%b_vert(i)%btype
     tmp(4,i) = bcs%b_vert(i)%breg
     if(bcs%b_vert(i)%ddc) then
       tmp(5,i) = 1
     else
       tmp(5,i) = 0
     endif
   enddo
   call write_octave(tmp,'<cell-element>',fu)
   deallocate(tmp)

   ! field 06 : b_side
   write(fu,'(a)')      '# name: b_side'
   write(fu,'(a)')      '# type: cell'
   write(fu,'(a)')      '# rows: 1'
   write(fu,'(a)')      '# columns: 1'
   allocate(tmp(4,bcs%nb_side))
   do i=1,bcs%nb_side
     tmp(1,i) = bcs%b_side(i)%s%i
     tmp(2,i) = bcs%b_side(i)%bc
     tmp(3,i) = bcs%b_side(i)%btype
     if(bcs%b_side(i)%ddc) then
       tmp(4,i) = 1
     else
       tmp(4,i) = 0
     endif
   enddo
   call write_octave(tmp,'<cell-element>',fu)
   deallocate(tmp)

 end subroutine write_bcs_struct

!-----------------------------------------------------------------------

end module mod_bcs

