!! Copyright (C) 2009,2010,2011,2012  Marco Restelli
!!
!! This file is part of:
!!   LDGH -- Local Hybridizable Discontinuous Galerkin toolkit
!!
!! LDGH is free software: you can redistribute it and/or modify it
!! under the terms of the GNU General Public License as published by
!! the Free Software Foundation, either version 3 of the License, or
!! (at your option) any later version.
!!
!! LDGH is distributed in the hope that it will be useful, but WITHOUT
!! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
!! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
!! License for more details.
!!
!! You should have received a copy of the GNU General Public License
!! along with LDGH. If not, see <http://www.gnu.org/licenses/>.
!!
!! author: Marco Restelli                   <marco.restelli@gmail.com>


!>\brief
!!
!! RHS for the compressible Navier-Stokes equations, DG elements
!!
!! \n
!!
!! The RHS is computed with two loops: the first one goes through the
!! elements, the second one through the sides. The boundary conditions
!! are enforced in the side loop. The basis is assumed to be
!! orthogonal, so that the mass matrix is diagonal.
!!
!! \section dissfluxes Dissipative fluxes
!! 
!! Complex turbulent fluxes can depend on the gradient of various
!! thermodynamic or dynamic variables, such as velocity
!! \f$\underline{u}\f$, temperatures \f$T\f$ and \f$\theta\f$, density
!! \f$\rho\f$ and so on. As a consequence, in this module, we
!! typically deal with three kinds of arrays:
!! <ul>
!!  <li> arrays referring to the prognostic variables: dynamic and
!!  thermodynamic variables and tracers
!!  <li> arrays referring to the diffusive fluxes of the prognostic
!!  variables: all the prognostic variables except density
!!  <li> arrays referring to the gradients of some prognostic or
!!  diagnostic variables (see also the module variable \c grad for
!!  additional information).
!! </ul>
!! In general, the number of gradients that must be computed is
!! different from the number of diffusive fluxes, because a given
!! diffusive flux can depend on more than one gradient (a typical
!! example are turbulent fluxes depending on the Richardson number).
!!
!! \section communications MPI communications
!!
!! The MPI communication buffers and data structures are defined if
!! there is at least one ddc side. This is controlled by the module
!! variable <tt>ddc\%ddc</tt>. The following points are considered in
!! the MPI communications:
!! <ul>
!!  <li> communications are performed only with those subdomains which
!!  share at least one ddc side with the present one (i.e. no
!!  communications with zero data length)
!!  <li> the sent and the received information have the same size and
!!  structure, so that send and receive buffers have the same
!!  dimension
!!  <li> the side quadrature points in the send and receive buffers
!!  are always ordered according to the receiver ordering
!! </ul>
!!
!! \note All the MPI communications rely on the module variable \c
!! ddc. Each subroutine accessing this variable should be documented,
!! explaining whether it makes a self contained use of it or whether
!! it uses \c ddc to pass or receive information to other subroutines.
!!
!! Concerning the communication associated with the dissipative
!! fluxes, we notice that only the prognostic variables and the
!! dissipative fluxes must be communicated, while the discrete
!! gradients do <em>not</em> require any communication. This happens
!! because the discrete gradients can be evaluated separately on each
!! processor given the values of the prognostic variables.
!! Summarizing, since the communication of the prognostic variables is
!! required in any case by the inviscid model, we see that the viscous
!! model results in the additional communication of the dissipative
!! fluxes.
!<----------------------------------------------------------------------
module mod_dgcomp_rhs

!-----------------------------------------------------------------------

 use mod_kinds, only: &
   mod_kinds_initialized, &
   wp

 use mod_messages, only: &
   mod_messages_initialized, &
   error,   &
   warning, &
   info

 !$ use omp_lib

 !$ use mod_omp_utils, only: &
 !$   mod_omp_utils_initialized, &
 !$   detailed_timing_omp, &
 !$   omput_push_key,    &
 !$   omput_pop_key,     &
 !$   omput_start_timer, &
 !$   omput_close_timer, &
 !$   omput_write_time

 use mod_mpi_utils, only: &
   mod_mpi_utils_initialized, &
   mpi_integer, wp_mpi,  &
   mpi_status_size,      &
   mpi_isend, mpi_irecv, &
   mpi_waitall, mpi_alltoallv

 use mod_perms, only: &
   mod_perms_initialized, &
   t_perm, idx, operator(*)

 use mod_linal, only: &
   mod_linal_initialized, &
   invmat

 use mod_sympoly, only: &
   mod_sympoly_initialized, &
   t_sympol,     &
   operator(*),  &
   me_int

 use mod_base, only: &
   mod_base_initialized, &
   t_base

 use mod_grid, only: &
   mod_grid_initialized, &
   t_grid, t_ddc_grid,  &
   affmap,              &
   el_linear_size

 use mod_bcs, only: &
   mod_bcs_initialized, &
   t_bcs,                     &
   b_dir,   b_neu,   b_ddc

 use mod_dgcomp_testcases, only: &
   mod_dgcomp_testcases_initialized, &
   ntrcs, t_phc, phc,      &
   coeff_dir, coeff_norm,  &
   coeff_visc, coeff_neu

 use mod_atm_refstate, only: &
   mod_atm_refstate_initialized, &
   t_atm_refstate_e, atm_ref_e, &
   t_atm_refstate_s, atm_ref_s

 use mod_turb_flux, only: &
   mod_turb_flux_initialized, &
   i_turb_mod, t_turb_input, t_turb_diags, &
   viscous_flux, smagrich_flux

!-----------------------------------------------------------------------
 
 implicit none

!-----------------------------------------------------------------------

! Module interface

 public :: &
   mod_dgcomp_rhs_constructor, &
   mod_dgcomp_rhs_destructor,  &
   mod_dgcomp_rhs_initialized, &
   t_bcs_error, &
   dgcomp_tens, & ! tendency computation
   compute_courant, additional_diagnostics

 private

!-----------------------------------------------------------------------

! Module types and parameters

 ! public members

 !> This type is used to indicate that an error has occurred in the
 !! computation of the boundary condition
 type t_bcs_error
   logical :: lerr = .false.
   character(len=1000) :: message
 end type t_bcs_error

 ! private members

 !> communication interface with each neighbouring subdomain
 type t_nd
   integer :: id  !< subdomain id
   integer :: nns !< number of shared sides
   integer, allocatable :: ie(:)      !< side elements
   integer, allocatable :: isl(:)     !< local side index
   integer, allocatable :: p_s2s(:)   !< side to side permutation
   real(wp), allocatable :: pb(:,:,:) !< basis traces
   !> @name MPI buffers and requests
   ! !! Counters, buffers and requests for the prognostic variables and
   ! !! the dissipative fluxes (prefix \c f*)
   !! @{
   integer :: cnt, fnt
   real(wp), allocatable :: recbuf(:,:,:), senbuf(:,:,:)
   real(wp), allocatable :: fecbuf(:,:,:), fenbuf(:,:,:)
   integer :: recreq, senreq
   !! @}
 end type t_nd

 !> collect all the module domain decomposition information
 !!
 !! \note Buffers should be module variables when using nonblocking
 !! communications, to minimize problems with temporaries.
 type t_ddc
   logical :: ddc  !< <tt>.true.</tt> if there are ddc sides
   integer :: comm !< MPI communicator
   integer :: nnd  !< number of neighbouring subdomains
   type(t_nd), allocatable :: nd(:) !< neighbouring subdomains (nnd)
   !> This field is used to read the receive buffers, associating to
   !! each local boundary side a position in the receive buffers; the
   !! first row gives the subdomain index, while the second the local
   !! side position. The array is allocated for all the boundary
   !! sides, but it is defined only for the ddc sides.
   integer, allocatable :: s2nd(:,:)
 end type t_ddc

! Module variables

 ! public members
 logical, protected ::               &
   mod_dgcomp_rhs_initialized = .false.
 ! private members
 logical :: ddc_allocated, ldg_allocated
 integer :: &
   uuu_size, & !< size of the prognostic variable arrays
   dfu_size, & !< size of the dissipative flux arrays
   grd_size    !< size of the gradient arrays
 !> boundary values and normal fluxes (these latters with respect to
 !! the side normal)
 !! @{
 real(wp), allocatable :: uuu_bcs(:,:,:), nflux_bcs(:,:,:)
 real(wp), allocatable :: gamma0(:,:,:), gammad(:,:,:)
 !! @}
 
 !> Gradient (diagnostic)
 !!
 !! At each time step, if <tt>viscous_flow.eqv..true.</tt>, the
 !! gradient must be diagnosed. The variable \c grad stores the
 !! gradients of the following quatities:
 !! <ul>
 !!  <li> temperature (second index equal to 1)
 !!  <li> velocity (second index from 2 to <tt>1+grid\%d</tt>)
 !!  <li> potential temperature (second index equal to
 !!    <tt>2+grid\%d</tt>)
 !!  <li> tracers (second index \f$\geq\f$<tt>3+grid\%d</tt>)
 !! </ul>
 !!
 !! \note Velocity, temperature and the remaining variables are not
 !! polynomial functions, since they are not the primal unknown of the
 !! formulation. In practice, in this module, we compute the gradient
 !! of the \f$L^2\f$ projections of velocity and temperature.
 real(wp), allocatable :: grad(:,:,:,:)
 type(t_ddc) :: ddc

 !> turbulence
 procedure(i_turb_mod), pointer :: turb_mod => null()

 character(len=*), parameter :: &
   this_mod_name = 'mod_dgcomp_rhs'

 interface reflect_vect
   module procedure reflect_vect_1n, reflect_vect_var
 end interface reflect_vect

!-----------------------------------------------------------------------

contains

!-----------------------------------------------------------------------

 !> Module setup
 !!
 !! The MPI functions are used if the optional argument \c comm is
 !! present. Notice that this is necessary if there are \c b_ddc type
 !! boundary conditions.
 !<
 subroutine mod_dgcomp_rhs_constructor(grid,ddc_grid,viscous_flow, &
                                       chosen_turb_model,comm,base)
  type(t_grid),     intent(in) :: grid
  type(t_ddc_grid), intent(in) :: ddc_grid
  logical,          intent(in) :: viscous_flow
  character(len=*), intent(in) :: chosen_turb_model
  integer,          intent(in), optional :: comm
  type(t_base),     intent(in), optional :: base
  character(len=*), parameter :: &
    this_sub_name = 'constructor'

  integer :: nnd, id, ins, i, ierr
  integer, allocatable :: nd2nd(:), offset(:), offse2(:), &
    senbuf(:,:), recbuf(:,:)

   !Consistency checks ---------------------------
   if( (mod_kinds_initialized.eqv..false.) .or. &
    (mod_messages_initialized.eqv..false.) .or. &
!$     ( (detailed_timing_omp.eqv..true.).and. &
!$       (mod_omp_utils_initialized.eqv..false.) ) .or. &
   (mod_mpi_utils_initialized.eqv..false.) .or. &
       (mod_perms_initialized.eqv..false.) .or. &
       (mod_linal_initialized.eqv..false.) .or. &
     (mod_sympoly_initialized.eqv..false.) .or. &
        (mod_base_initialized.eqv..false.) .or. &
        (mod_grid_initialized.eqv..false.) .or. &
         (mod_bcs_initialized.eqv..false.) .or. &
(mod_dgcomp_testcases_initialized.eqv..false.) .or. & 
(mod_atm_refstate_initialized.eqv..false.) .or. &
   (mod_turb_flux_initialized.eqv..false.) ) then
     call error(this_sub_name,this_mod_name, &
                'Not all the required modules are initialized.')
   endif
   if(mod_dgcomp_rhs_initialized.eqv..true.) then
     call warning(this_sub_name,this_mod_name, &
                  'Module is already initialized.')
   endif
   !----------------------------------------------

   ! size of primal and dual variables
   uuu_size = 2+grid%d+ntrcs
   if(viscous_flow) then
     dfu_size = 1+grid%d+ntrcs
     grd_size = 1+grid%d+1+ntrcs ! see comments for the grad variable
   else
     dfu_size = 0
     grd_size = 0
   endif

   ! boundary condition
   allocate( uuu_bcs(uuu_size,base%ms,grid%ni+1:grid%ns) , &
           nflux_bcs(dfu_size,base%ms,grid%ni+1:grid%ns) )

   ! domain decomposition boundary conditions
   ddc%ddc = .false.
   ddc_if: if(present(comm)) then
     ddc%ddc  = .true.

     ddc%comm = comm
     ddc%nnd  = count(ddc_grid%nns_id.gt.0)
     allocate( ddc%nd(ddc%nnd) , ddc%s2nd(2,grid%ni+1:grid%ns) )
     allocate( nd2nd(0:ddc_grid%nd-1) ) ! work array
     nnd = 0 ! counter
     do id=0,ddc_grid%nd-1
       if(ddc_grid%nns_id(id).gt.0) then
         nnd = nnd+1
         nd2nd(id) = nnd
         ddc%nd(nnd)%id  = id
         ddc%nd(nnd)%nns = ddc_grid%nns_id(id)
         allocate( ddc%nd(nnd)%ie   (ddc%nd(nnd)%nns) , &
                   ddc%nd(nnd)%isl  (ddc%nd(nnd)%nns) , &
                   ddc%nd(nnd)%p_s2s(ddc%nd(nnd)%nns) , &
      ddc%nd(nnd)%pb(base%pk,base%ms,ddc%nd(nnd)%nns) , &
 ddc%nd(nnd)%recbuf(uuu_size,base%ms,ddc%nd(nnd)%nns) , &
 ddc%nd(nnd)%senbuf(uuu_size,base%ms,ddc%nd(nnd)%nns) , &
 ddc%nd(nnd)%fecbuf(dfu_size,base%ms,ddc%nd(nnd)%nns) , &
 ddc%nd(nnd)%fenbuf(dfu_size,base%ms,ddc%nd(nnd)%nns) )
         ddc%nd(nnd)%cnt = uuu_size*base%ms*ddc%nd(nnd)%nns
         ddc%nd(nnd)%fnt = dfu_size*base%ms*ddc%nd(nnd)%nns
       endif
     enddo

     ! now we only have to fill ie, isl, p_s2s, pb and s2nd
     allocate(offset(ddc%nnd)); offset = 0 ! work array
     allocate(offse2(0:ddc_grid%nd-1)); offse2 = 0
     do i=1,ddc_grid%nd-1
       offse2(i) = offse2(i-1) + ddc_grid%nns_id(i-1)
     enddo
     allocate(senbuf(2,ddc_grid%nns),recbuf(2,ddc_grid%nns))
     do ins=1,ddc_grid%nns
       id = nd2nd( ddc_grid%ns(ins)%id ) ! local position in ddc%nd
       offset(id) = offset(id) + 1
       ddc%nd(id)%ie (offset(id)) = grid%s(ddc_grid%ns(ins)%i)%ie(1)
       ddc%nd(id)%isl(offset(id)) = grid%s(ddc_grid%ns(ins)%i)%isl(1)
     ddc%nd(id)%p_s2s(offset(id)) = ddc_grid%ns(ins)%p_s2s
       ! we need two permutations: from element to local side and from
       ! local side to neighbouring side
      ddc%nd(id)%pb(:,:,offset(id)) = base%pb(:,                     &
          base%stab(                                                 &
    grid%e(ddc%nd(id)%ie(offset(id)))%pi(ddc%nd(id)%isl(offset(id))) &
                    , : ),                                           &
                      ddc%nd(id)%isl(offset(id))     )
      ddc%nd(id)%pb(:,:,offset(id)) = ddc%nd(id)%pb(:,          &
          base%stab(ddc%nd(id)%p_s2s(offset(id)),:) , offset(id))

       senbuf(:,offse2(ddc_grid%ns(ins)%id)+offset(id)) =          &
                             (/ ddc_grid%id , ddc_grid%ns(ins)%in /)
     enddo
     ! to fill s2nd, each subdomain informs the neighbours about the
     ! order which it will use to send the data
     call mpi_alltoallv(                                      &
            senbuf, 2*ddc_grid%nns_id, 2*offse2, mpi_integer, &
            recbuf, 2*ddc_grid%nns_id, 2*offse2, mpi_integer, &
                        ddc%comm,ierr)
     offset = 0
     do i=1,ddc_grid%nns
       ddc%s2nd(1,recbuf(2,i)) = nd2nd(recbuf(1,i))
       offset(nd2nd(recbuf(1,i))) = offset(nd2nd(recbuf(1,i))) + 1
       ddc%s2nd(2,recbuf(2,i)) = offset(nd2nd(recbuf(1,i))) 
     enddo
     
     ! deallocate work arrays
     deallocate( nd2nd , offset , offse2 , senbuf , recbuf )
   endif ddc_if

   ldg_allocated = .false.
   ldg_if: if(viscous_flow) then
     call ldg_coefficients(base)
     allocate( grad( grid%d , grd_size , base%pk , grid%ne ) )
     ldg_allocated = .true.

     ! Set the turbulence model
     turb_mod_case: select case(trim(chosen_turb_model))
      case('viscous')
       turb_mod => viscous_flux
      case('smagrich')
       turb_mod => smagrich_flux
      case default
       call error(this_sub_name,this_mod_name, &
            'Unknown turbulence model "'//trim(chosen_turb_model)//'".')
     end select turb_mod_case
   endif ldg_if

   mod_dgcomp_rhs_initialized = .true.
 end subroutine mod_dgcomp_rhs_constructor

!-----------------------------------------------------------------------
 
 subroutine mod_dgcomp_rhs_destructor()
  character(len=*), parameter :: &
    this_sub_name = 'destructor'
   
   !Consistency checks ---------------------------
   if(mod_dgcomp_rhs_initialized.eqv..false.) then
     call error(this_sub_name,this_mod_name, &
                'This module is not initialized.')
   endif
   !----------------------------------------------

   deallocate( uuu_bcs , nflux_bcs )

   if(ddc%ddc) deallocate( ddc%nd , ddc%s2nd )

   if(ldg_allocated) deallocate( gamma0 , gammad , grad )

   mod_dgcomp_rhs_initialized = .false.
 end subroutine mod_dgcomp_rhs_destructor

!-----------------------------------------------------------------------
 
 !> Setup the coefficients for the gradient computation.
 !!
 !! For viscous flows, the LDG method computes at each time step the
 !! gradient \f$\mathcal{G}q\f$ of a variable \f$q\f$ solving
 !! \f{equation}{
 !!   \int_K\mathcal{G}q\cdot\underline{\varphi}\,dx = 
 !!   -\int_Kq\nabla\cdot\underline{\varphi}\,dx + 
 !!   \int_{\partial K}
 !!   \hat{q}\underline{n}\cdot\underline{\varphi}\,d\sigma.
 !! \f}
 !! Denoting by \f$\left(\mathcal{G}q\right)_i \f$ the vector
 !! coefficients of \f$\mathcal{G}q\f$, so that
 !! \f{displaymath}{
 !!  \mathcal{G}q = \sum_i \left(\mathcal{G}q\right)_i \varphi_i,
 !! \f}
 !! we obtain
 !! \f{equation}{
 !!  \left(\mathcal{G}q\right)_i = \sum_k \left(M^{K,-1}\right)_{ik}
 !!  \left[
 !!   -\int_Kq\nabla\varphi_k\,dx + 
 !!   \int_{\partial K}
 !!   \hat{q}\varphi_k\underline{n}\,d\sigma \right],
 !! \f}
 !! where
 !! \f{equation}{
 !!  M^K_{ij} = \int_K\varphi_i\varphi_j\,dx = det(B)\hat{\Xi}_{ij}.
 !! \f}
 !! The two contributions are now handled separately, since they are
 !! computed with an element and a side loop, respectively. For the
 !! first contribution we have
 !! \f{equation}{
 !!  - \sum_k \left(M^{K,-1}\right)_{ik} \int_Kq\nabla\varphi_k\,dx =
 !!  B^{-T}\sum_l\hat{\underline{\Gamma}}^0_{il}\, q(x_l)
 !! \f}
 !! with
 !! \f{equation}{
 !!   \hat{\underline{\Gamma}}^0_{il} =
 !!   -\hat{w}_l\sum_k\hat{\Xi}^{-1}_{ik}
 !!   \nabla\hat{\varphi}_k(\hat{x}_l).
 !! \f}
 !! Concerning the second contribution, we have
 !! \f{equation}{
 !!  \sum_k \left(M^{K,-1}\right)_{ik} \int_{\partial K}
 !!  \hat{q}\varphi_k\underline{n}\,d\sigma = \sum_{e\in\partial K}
 !!  \frac{|e|}{det(B)}\underline{n}\sum_l
 !!  \hat{\Gamma}^{\partial}_{il}\,\hat{q}(x_l)
 !! \f}
 !! with
 !! \f{equation}{
 !!   \hat{\Gamma}^{\partial}_{il} = \frac{\hat{w}^s_l}{|\hat{e}|}
 !!   \sum_k\hat{\Xi}^{-1}_{ik} \hat{\varphi}_k(\hat{x}_l).
 !! \f}
 !! \note In this module we assume that the basis is orthonormal, so
 !! that \f$\hat{\Xi}\f$ is the identity matrix; nevertheless we write
 !! here the complete expression, since it doesn't imply any overhead.
 !<
 subroutine ldg_coefficients(base)
  type(t_base), intent(in) :: base
 
  integer :: i, j, k, l
  real(wp), allocatable :: xi(:,:), xi_i(:,:)
  character(len=*), parameter :: &
    this_sub_name = 'ldg_coefficients'

   allocate( gamma0(base%me%d,base%pk,base%m             ) , &
             gammad(          base%pk,base%ms,base%me%d+1) )

   ! work array
   allocate( xi(base%pk,base%pk) , xi_i(base%pk,base%pk) )
   do i=1,base%pk
     do j=1,i ! the mass matrix is symmetric
       xi(i,j) = me_int( base%me%d , base%p_s(i)*base%p_s(j) )
       if(j.ne.i) xi(j,i) = xi(i,j)
     enddo
   enddo
   call invmat(xi,xi_i)

   ! compute gamma0
   do i=1,base%pk
     do l=1,base%m
       gamma0(:,i,l) = 0.0_wp
       do k=1,base%pk
         gamma0(:,i,l) = gamma0(:,i,l) + xi_i(i,k)*base%gradp(:,k,l)
       enddo
       gamma0(:,i,l) = -base%wg(l)*gamma0(:,i,l)
     enddo
   enddo

   ! compute gammad
   do j=1,base%me%d+1
     do l=1,base%ms
       gammad(:,l,j) = base%wgs(l)/base%me%voldm1      &
                       * matmul( xi_i , base%pb(:,l,j) )
     enddo
   enddo
 
   ! cleanup
   deallocate(xi,xi_i)

 end subroutine ldg_coefficients
 
!-----------------------------------------------------------------------
 
 !> Tendencies computations
 !!
 !! The NS equations are solved in the following form:
 !! \f{equation}{
 !!  \begin{array}{rcl}
 !!   \displaystyle
 !!   \frac{d}{dt}\int_K\rho\varphi\,dx & = &
 !!   \displaystyle
 !!    \int_K \underline{U}\cdot\nabla\varphi\,dx 
 !!   - \int_{\partial K}\hat{\underline{U}}\cdot \underline{n}
 !!   \varphi\,d\sigma \\[3mm]
 !!   \displaystyle
 !!   \frac{d}{dt}\int_K E \varphi\,dx & = &
 !!   \displaystyle
 !!    \int_K \frac{H}{\rho}\underline{U}\cdot\nabla\varphi\,dx 
 !!   - \int_{\partial K}\widehat{\frac{H}{\rho}\underline{U}}\cdot
 !!   \underline{n} \varphi\,d\sigma-
 !!   \mathcal{D}_K\underline{F}^d_E  \\[3mm]
 !!   \displaystyle
 !!   \frac{d}{dt}\int_K U_{i_d} \varphi\,dx & = &
 !!   \displaystyle
 !!    \int_K \left[ \frac{U_{i_d}}{\rho}\underline{U} +
 !!   p'\underline{e}_{i_d}\right]\cdot\nabla\varphi\,dx
 !!   - \int_{\partial K}\widehat{\left[
 !!   \frac{U_{i_d}}{\rho}\underline{U} +
 !!   p'\underline{e}_{i_d}\right]} \cdot \underline{n}
 !!   \varphi\,d\sigma- \mathcal{D}_K\underline{F}^d_{U_{i_d}}
 !!   + \int_K \rho'g_{i_d}\varphi\,dx
 !!  \end{array}
 !! \f}
 !! where the divergence of the diffusive (or turbulent) flux for a
 !! generic quantity \f$q\f$ is defined as
 !! \f{equation}{
 !!   \mathcal{D}_K\underline{F}^d_q = 
 !!    -\int_K \underline{F}^d_q\cdot \nabla\varphi\,dx 
 !!   + \int_{\partial K}\widehat{\underline{F}^d_q}\cdot
 !!   \underline{n} \varphi\,d\sigma.
 !! \f}
 !! The energy and momentum diffusive/turbulent fluxes are computed in
 !! \c mod_turb_flux, and the (numerical) gradients required for such
 !! computation are obtained as described in \c ldg_coefficients.
 !!
 !! Tracers are treated in conservative form, so that for a tracer
 !! \f$C\f$ we have
 !! \f{equation}{
 !!  \begin{array}{rcl}
 !!   \displaystyle
 !!   \frac{d}{dt}\int_K C \varphi\,dx & = &
 !!   \displaystyle
 !!    \int_K \frac{C}{\rho}\underline{U}\cdot\nabla\varphi\,dx 
 !!   - \int_{\partial K}\widehat{\frac{C}{\rho}\underline{U}}\cdot
 !!   \underline{n} \varphi\,d\sigma-
 !!   \mathcal{D}_K\underline{F}^d_C.
 !!  \end{array}
 !! \f}
 !! The concentration is then \f$c=C/\rho\f$; use of equal order basis
 !! functions ensures that constant solutions of the form \f$c\equiv
 !! 1\f$ are always preserved.
 !!
 !! \note The presence of the diffusive terms has two overheads: two
 !! additional loops are required for the precomputation of the
 !! gradients, an element loop and a side loop; additional terms are
 !! present in the evolution equations.
 !!
 !! The integrals are computed evaluating the (nonlinear) terms in the
 !! quadrature nodes and then using the quadrature formula. Due to the
 !! strong nonlinearities, this solution is simpler comparing to
 !! expanding the integrals in terms of the basis functions, as it
 !! done, for instance, in \c mod_cg_ns_linsistem for the
 !! incompressible NS system. For the computation of the numerical
 !! fluxes, the simplest option is the <em>Rusanov</em> flux, for
 !! which we have
 !! \f{equation}{
 !!  \begin{array}{rcl}
 !!   \displaystyle
 !!   \hat{\underline{U}} & = &
 !!   \displaystyle
 !!   \frac{1}{2}\left( \underline{U}^l +
 !!   \underline{U}^r \right) + \frac{|\lambda|}{2}\left(
 !!   \rho^l\underline{n}^l + \rho^r\underline{n}^r \right)
 !!  \end{array}
 !! \f}
 !! where the superscripts \f$l\f$ and \f$r\f$ denote the two values
 !! at the element interface and
 !! \f{equation}{
 !!  \lambda = \max_{l,r}\left(|\underline{u}\cdot\underline{n}| +
 !!  a\right).
 !! \f}
 !! Denoting by \f$\hat{\underline{F}}\f$ a generic numerical flux, the
 !! computation of the boundary terms is done as follows:
 !! <ul>
 !!  <li> compute the (single valued) quantity
 !!  \f$\hat{F}_{n^e} = \hat{\underline{F}}\cdot\underline{n}^e\f$,
 !!  where \f$\underline{n}^e\f$ is the intrinsic side normal (i.e.
 !!  the normal of the first connected element)
 !!  <li> compute
 !!  \f{equation}{
 !!   (\underline{n}^{K^l}\cdot\underline{n}^e) \int_{\partial K^l\cap
 !!   e}\hat{F}_{n^e}\varphi^l\,d\sigma
 !!  \f}
 !!  and the similar quantity for \f$K^r\f$.
 !! </ul>
 !!
 !! \section ddc_comm MPI communications
 !!
 !! Communications are required with the following pattern
 !! <ul>
 !!  <li> send/receive the boundary values of the prognostic
 !!  variables
 !!  <li> send/receive the boundary values of the dissipative
 !!  fluxes.
 !! </ul>
 !!
 !! \note Subroutines called in this subroutine make use of the module
 !! variable \c ddc; such use, however, is self-contained.
 !<
 subroutine dgcomp_tens(tens,grid,base,bcs,uuu,err,viscous_flow)
  type(t_grid),      intent(in) :: grid
  type(t_base),      intent(in) :: base
  type(t_bcs),       intent(in) :: bcs
  real(wp),          intent(in) :: uuu(:,:,:)
  logical,           intent(in) :: viscous_flow
  real(wp),          intent(out) :: tens(:,:,:)
  type(t_bcs_error), intent(out) :: err
 
  integer :: ie
  character(len=*), parameter :: &
    this_sub_name = 'dgcomp_tens'

   !--------------------------------------------------------------------
   !1) Gradient computations (if required)
   if(viscous_flow) then

     !------------------------------------------------------------------
     !1.1) Update the module variable grad
     call dgcomp_grad(grid,base,bcs,uuu,err)
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !1.2) Second communication (uuu communicated in dgcomp_grad)
     if(ddc%ddc) call bside_send_flux(uuu,grad,grid,base)
     !------------------------------------------------------------------

   else

     !------------------------------------------------------------------
     !1.1) First communication
     if(ddc%ddc) call bside_send(uuu)
     !------------------------------------------------------------------

   endif
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !3) Initialization
   tens = 0.0_wp
   !--------------------------------------------------------------------
   
   !--------------------------------------------------------------------
   !4) Element loop
   call tens_elem_loop(tens,grid,base,uuu,viscous_flow)
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !5) Side loop
   ! Note: the receive receives the variable if the flow is inviscid
   ! and the flux if the flow is viscous.
   if(ddc%ddc) call bside_recv()
   if(viscous_flow) then
     call update_fluxbcs(nflux_bcs,grid,base,bcs,uuu,grad)
   else
     call update_bcs(uuu_bcs,grid,base,bcs,uuu,err)
   endif
   call tens_side_loop(tens,grid,base,uuu,viscous_flow)
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !6) Include diagonal mass matrix
   !$omp parallel do schedule(static) &
   !$omp   private(ie) shared(tens,grid) default(none)
   do ie=1,grid%ne
     tens(:,:,ie) = tens(:,:,ie)/grid%e(ie)%det_b
   enddo
   !$omp end parallel do
   !--------------------------------------------------------------------

 end subroutine dgcomp_tens
 
!-----------------------------------------------------------------------

 !> Gradient computation
 !!
 !! This function computes the (DG) gradient of the state \c uuu and
 !! stores it in the module variable \c grad.
 !!
 !! For the definition of the gradient, we follow <a
 !! href="http://dx.doi.org/10.1137/S0036142900371003">[Castillo, Cockburn,
 !! Perugia, Sch&ouml;tzau, 2000]</a> and set
 !! \f{displaymath}{
 !!  \qquad c_{12} = c_{22} = 0.
 !! \f}
 !! In practice, as far as the gradient is concerned, the LDG method
 !! considered here coincides with the Bassi-Rebay method.
 !!
 !! Note that this function updates various module variables:
 !! <ul>
 !!  <li> by calling \c update_bcs, it updates the module variable \c
 !!  uuu_bcs
 !!  <li> it directly updates \c grid.
 !! </ul>
 !!
 !! \note This subroutine (indirectly) makes a self-contained use of
 !! the module variable \c ddc.
 !<
 subroutine dgcomp_grad(grid,base,bcs,uuu,err)
  type(t_grid),      intent(in) :: grid
  type(t_base),      intent(in) :: base
  type(t_bcs),       intent(in) :: bcs
  real(wp),          intent(in) :: uuu(:,:,:)
  type(t_bcs_error), intent(out) :: err

   !--------------------------------------------------------------------
   !0) Start communication
   if(ddc%ddc) call bside_send(uuu)
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !1) Initialization
   grad = 0.0_wp
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !2) Gradient element loop
   call grad_elem_loop(grad,grid,base,uuu)
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !3) Complete communication and update bcs
   if(ddc%ddc) call bside_recv()
   call update_bcs(uuu_bcs,grid,base,bcs,uuu,err)
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   !4) Gradient side loop
   call grad_side_loop(grad,grid,base,uuu)
   !--------------------------------------------------------------------

 end subroutine dgcomp_grad

!-----------------------------------------------------------------------

 !> Start the nonblocking communication of \c uuu
 !!
 !! \note Thus subroutine uses the send and receive buffers of the
 !! module variable \c ddc.
 !<
 subroutine bside_send(uuu)
  real(wp), intent(in) :: uuu(:,:,:)

  integer :: id, i, ie, ierr

   ! loop on the neighbours
   neigh_do: do id=1,ddc%nnd

     !1) receive the data
     call mpi_irecv(      &
       ddc%nd(id)%recbuf, & ! buffer
       ddc%nd(id)%cnt,    & ! count
       wp_mpi,            & ! data type
       ddc%nd(id)%id,     & ! source
       1,                 & ! tag
       ddc%comm,          & ! communicator
       ddc%nd(id)%recreq, & ! request handle
       ierr )

     !2) prepare and send the data
     do i=1,ddc%nd(id)%nns
       ie = ddc%nd(id)%ie(i)
       ddc%nd(id)%senbuf(:,:,i) =                                   &
                         matmul( uuu(:,:,ie) , ddc%nd(id)%pb(:,:,i) )
     enddo
     call mpi_isend(      &
       ddc%nd(id)%senbuf, & ! buffer
       ddc%nd(id)%cnt,    & ! count
       wp_mpi,            & ! data type
       ddc%nd(id)%id,     & ! destination
       1,                 & ! tag
       ddc%comm,          & ! communicator
       ddc%nd(id)%senreq, & ! request handle
       ierr )

   enddo neigh_do

 end subroutine bside_send

!-----------------------------------------------------------------------

 !> Start the nonblocking communication of the complete dissipative
 !! fluxes
 !!
 !! This subroutine is similar to \c bside_send, except that it
 !! considers the dissipative fluxes. Such fluxes are not stored in a
 !! global variable analogous to \c uuu, but they are computed on the
 !! fly for each ddc boundary side.
 !!
 !! \note Thus subroutine uses the send and receive buffers of the
 !! module variable \c ddc.
 !<
 subroutine bside_send_flux(uuu,grad,grid,base)
  real(wp), intent(in) :: uuu(:,:,:), grad(:,:,:,:)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base

  integer :: id, i, ie, isl, l, ll, ierr
  real(wp) :: fb(grid%d,dfu_size,base%ms), n(grid%m)

   ! loop on the neighbours
   neigh_do: do id=1,ddc%nnd

     !1) receive the data
     call mpi_irecv(      &
       ddc%nd(id)%fecbuf, & ! buffer
       ddc%nd(id)%fnt,    & ! count
       wp_mpi,            & ! data type
       ddc%nd(id)%id,     & ! source
       2,                 & ! tag
       ddc%comm,          & ! communicator
       ddc%nd(id)%recreq, & ! request handle
       ierr )

     !2) prepare and send the data
     do i=1,ddc%nd(id)%nns
       ie  = ddc%nd(id)%ie(i)
       isl = ddc%nd(id)%isl(i)
       call elemside_viscflux( fb , ie , isl ,        &
           uuu(:,:,ie) , grad(:,:,:,ie) , grid , base )
       ! side normal: from the viewpoint of the receiving side
       n = -grid%e(ie)%n(:,isl)

       ! We do here two things: compute the normal fluxes, and permute
       ! the side nodes according to the destination ordering
       do l=1,base%ms
         ll = base%stab(ddc%nd(id)%p_s2s(i),l)
         ddc%nd(id)%fenbuf(:,l,i) = matmul( n , fb(:,:,ll) )
       enddo
     enddo
     call mpi_isend(      &
       ddc%nd(id)%fenbuf, & ! buffer
       ddc%nd(id)%fnt,    & ! count
       wp_mpi,            & ! data type
       ddc%nd(id)%id,     & ! destination
       2,                 & ! tag
       ddc%comm,          & ! communicator
       ddc%nd(id)%senreq, & ! request handle
       ierr )

   enddo neigh_do

 end subroutine bside_send_flux

!-----------------------------------------------------------------------

 !> Wait all the pending communications in \c ddc
 !!
 !! This subroutines waits for all the send requests of
 !! <tt>ddc\%nd\%senreq</tt> and all the receive requests of
 !! <tt>ddc\%nd\%recreq</tt>. Notice that it doesn't matter here
 !! whether such requests concern the primal buffers \c recbuf and \c
 !! senbuf or the flux buffers \c fecbuf and \c fenbuf.
 !<
 subroutine bside_recv()
  integer :: ierr
  integer :: mpi_stat(mpi_status_size,ddc%nnd)

   call mpi_waitall( ddc%nnd , ddc%nd%recreq , mpi_stat , ierr )
   call mpi_waitall( ddc%nnd , ddc%nd%senreq , mpi_stat , ierr )

 end subroutine bside_recv

!-----------------------------------------------------------------------
 
 pure &
 !$ subroutine omp_dummy_2(); end subroutine omp_dummy_2
 subroutine tens_elem_loop(tens,grid,base,uuu,viscous_flow)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp),     intent(in) :: uuu(:,:,:)
  logical,      intent(in) :: viscous_flow
  real(wp),     intent(inout) :: tens(:,:,:)

  integer :: ie, l, id, it
  real(wp) :: &
    bit(grid%m,grid%d), wg(base%m), gradp(grid%d,base%pk,base%m),       &
    ugradp(base%pk), uuug(size(uuu,1),base%m), rho(base%m), e(base%m),  &
    u(grid%d,base%m), ekin(base%m), p_p(base%m), p(base%m), hh(base%m), &
    uu(grid%d,base%m), tnde(size(uuu,1),base%pk), cc(ntrcs,base%m),     &
    rgcv, mg
  real(wp) :: fem(grid%d,dfu_size,base%m)
  type(t_turb_input) :: turb_in

  character(len=*), parameter :: &
    this_sub_name = 'tens_elem_loop'

   !$omp parallel &
   !$omp   private( ie , bit , wg , l , gradp , uuug , rho , e , u , &
   !$omp           ekin , p_p , p , hh , id , jd , it , uu , cc ,    &
   !$omp           tnde , ugradp , rgcv , mg , turb_in , fem ) &
   !$omp   shared( grid , base , uuu , atm_ref_e , tens , phc , &
   !$omp           grad , viscous_flow ) &
   !$omp   default(none)
   rgcv = phc%rgas / phc%cv
   mg   = -phc%gravity
   allocate( turb_in%gsuu(grid%d,grid%d,base%m) , &
             turb_in%gtt (       grid%d,base%m) , &
             turb_in%gth (       grid%d,base%m) , &
             turb_in%gcc (grid%d, ntrcs,base%m) )
   !$omp do schedule(static)
   elem_do: do ie=1,grid%ne

     !------------------------------------------------------------------
     !1) Preliminary computations

     ! gradient of the basis functions (includes the quad. weighs)
     bit = transpose( grid%e(ie)%bi )
     wg = grid%e(ie)%det_b * base%wg
     do l=1,base%m
       ! notice: this should be changed when d.ne.m
       gradp(:,:,l) = wg(l) * matmul( bit , base%gradp(:,:,l) )
     enddo

     ! evaluate the solution at the quadrature nodes
     uuug = matmul( uuu(:,:,ie) , base%p )
     rho = uuug(1,:) + atm_ref_e(:,ie)%rho
     e   = uuug(2,:) + atm_ref_e(:,ie)%e
     u   = uuug(2+1:2+grid%d,:)

     ! kinetic energy
     ekin = 0.5_wp * sum( u**2 , 1 ) / rho
     ! pressure perturbation
     p_p  = rgcv*( uuug(2,:) - ekin - atm_ref_e(:,ie)%phi*uuug(1,:) )
     ! total pressure
     p    = p_p + atm_ref_e(:,ie)%p
     ! specific enthalpy
     hh   = (e+p)/rho
     ! velocity
     do id=1,grid%d
       uu(id,:) = u(id,:)/rho
     enddo
     ! tracers
     do it=1,ntrcs
       cc(it,:) = uuug(2+grid%d+it,:)/rho
     enddo
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Inviscid fluxes
     tnde = 0.0_wp ! element tendencies
     do l=1,base%m

       ! precompute U \cdot \nabla\phi (gradp includes quad. weighs!)
       ugradp = matmul( u(:,l) , gradp(:,:,l) )

       ! density: U \cdot \nabla\phi
       tnde(1,:) = tnde(1,:) + ugradp
       ! energy: hU \cdot \nabla\phi
       tnde(2,:) = tnde(2,:) + hh(l)*ugradp
       ! momentum: uU \cdot \nabla\phi + p'\partial_{x_i}\phi
       do id=1,grid%d
         tnde(2+id,:) = tnde(2+id,:) &
           + uu(id,l)*ugradp + p_p(l)*gradp(id,:,l)
       enddo
       ! gravity forcing in the momentum equation
       tnde(2+grid%d,:) = tnde(2+grid%d,:) &
         + wg(l) * mg*uuug(1,l)*base%p(:,l)
       ! tracers
       do it=1,ntrcs
         tnde(2+grid%d+it,:) = tnde(2+grid%d+it,:) + cc(it,l)*ugradp
       enddo

     enddo
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !3) Viscous fluxes
     if(viscous_flow) then

       ! compute the viscous fluxes
       call set_turb_input(turb_in, grid%d,base,grad(:,:,:,ie),base%p)
       call turb_mod(fem , grid%d, el_linear_size(grid%e(ie)),       &
                     affmap(grid%e(ie),base%xig), rho, p, uu, turb_in)

       ! compute the tendencies
       do l=1,base%m
         ! all the variables except density
         tnde(2:,:) = tnde(2:,:)                      &
           + matmul(transpose(fem(:,:,l)),gradp(:,:,l))
       enddo

     endif
     !------------------------------------------------------------------

     tens(:,:,ie) = tens(:,:,ie) + tnde
   enddo elem_do
   !$omp end do
   deallocate( turb_in%gsuu , turb_in%gtt , turb_in%gth , turb_in%gcc )
   !$omp end parallel
 
 end subroutine tens_elem_loop
 
!-----------------------------------------------------------------------

 pure &
 !$ subroutine omp_dummy_3(); end subroutine omp_dummy_3
 subroutine grad_elem_loop(grad,grid,base,uuu)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp),     intent(in) :: uuu(:,:,:)
  real(wp),     intent(inout) :: grad(:,:,:,:)

  integer :: ie, iv, l
  real(wp) :: uue(size(grad,2),base%m), bit(grid%d,grid%d), &
    gq(grid%d,base%pk)

   !$omp parallel do schedule(static) &
   !$omp   private( ie , uue , bit , iv , gq , l ) &
   !$omp   shared( grid , base , uuu , atm_ref_e , gamma0 , grad ) &
   !$omp   default(none)
   elem_do: do ie=1,grid%ne

     ! evaluate the solution in the quadrature nodes
     call grad_diagnostics( uue, grid, matmul(uuu(:,:,ie),base%p), &
       atm_ref_e(:,ie)%rho, atm_ref_e(:,ie)%e, atm_ref_e(:,ie)%phi )

     ! we also need bi^T
     bit = transpose( grid%e(ie)%bi )

     do iv=1,size(grad,2) ! variable loop
       gq = 0.0_wp
       do l=1,base%m
         gq = gq + gamma0(:,:,l) * uue(iv,l)
       enddo
       grad(:,iv,:,ie) = grad(:,iv,:,ie) + matmul( bit , gq )
     enddo

   enddo elem_do
   !$omp end parallel do

 end subroutine grad_elem_loop

!-----------------------------------------------------------------------
 
 pure &
 !$ subroutine omp_dummy_4(); end subroutine omp_dummy_4
 subroutine tens_side_loop(tens,grid,base,uuu,viscous_flow)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp),     intent(in) :: uuu(:,:,:)
  logical,      intent(in) :: viscous_flow
  real(wp),     intent(inout) :: tens(:,:,:)
 
  ! all the computations are done using the normal of the first
  ! element, and then the proper sign is introduced with the following
  ! parameter
  real(wp), parameter :: sigma(2) = (/ 1.0_wp , -1.0_wp /)
  integer :: is, ie_s(2),is_loc(2), ie, i_perm, it, l, iv
  real(wp) :: pb(base%pk,base%ms,2), n(grid%m), wgsa(base%ms), &
    uuug(size(uuu,1),base%ms,2), rho(base%ms,2), e(base%ms,2), &
    u(grid%d,base%ms,2), ekin(base%ms,2), p_p(base%ms,2),      &
    p(base%ms,2), hh(base%ms,2), uu(grid%d,base%ms,2),         &
    cc(ntrcs,base%ms,2), aun(base%ms,2),                       &
    fne(size(uuu,1),base%ms), lambda(base%ms), &
    tnd(size(uuu,1),base%pk,2), rgcv, gamma
  real(wp) :: fem(grid%d,dfu_size,base%ms,2)
  type(t_turb_input) :: turb_in
  character(len=*), parameter :: &
    this_sub_name = 'tens_side_loop'
 
   !$omp parallel &
   !$omp   private( is , ie_s , is_loc , ie , i_perm , pb , n ,       &
   !$omp           wgsa , uuug , rho , e , u , ekin , p_p , p , hh ,  &
   !$omp           uu , aun , l , lambda , fne , iv , tnd , &
   !$omp           rgcv , gamma , turb_in , fem ) &
   !$omp   shared( grid , base , uuu , atm_ref_s , tens , phc , &
   !$omp           viscous_flow , grad ) &
   !$omp   default(none)
   rgcv = phc%rgas / phc%cv
   gamma = phc%gamma
   allocate( turb_in%gsuu(grid%d,grid%d,base%ms) , &
             turb_in%gtt (       grid%d,base%ms) , &
             turb_in%gth (       grid%d,base%ms) , &
             turb_in%gcc (grid%d, ntrcs,base%ms) )
   !$omp do schedule(static)
   internal_side_do: do is=1,grid%ni

     !------------------------------------------------------------------
     !1) Preliminary computations

     ! the two elements of the side
     ie_s = grid%s(is)%ie

     ! side local indexes on the two elements
     is_loc = grid%s(is)%isl

     ! base function evaluations on the side quad. nodes
     do ie=1,2 ! loop on the two elements of the side
       ! index of the element -> side permutation (perm. pi)
       i_perm = grid%e(ie_s(ie))%pi(is_loc(ie))
       pb(:,:,ie) = & ! reorder on second index (side quad. node)
         base%pb(:, & ! first index unchanged
          base%stab(i_perm,:), & ! second index: reordered with pi
          is_loc(ie)) ! third index: local side
     enddo
     ! the normal of the first element is used as the side normal
     n = grid%e(ie_s(1))%n(:,is_loc(1))

     ! rescale the quadrature weights
     wgsa = (grid%s(is)%a/base%me%voldm1) * base%wgs

     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Compute the numerical fluxes (include quad. weights)

     !2.1) evaluate the solution in the quadrature nodes
     do ie=1,2
       uuug(:,:,ie) = matmul( uuu(:,:,ie_s(ie)) , pb(:,:,ie) )
     enddo
     call side_diags(rho, e, u, ekin, p_p, p, hh, uu, cc, aun, lambda, &
                     is, uuug, grid%d, rgcv, gamma, n, base%ms )

     !2.2) evaluate the normal numerical flux fne
     call side_numflux(fne,grid%d,base%ms,n,u,hh,uu,p_p,cc,lambda,uuug)

     ! add the viscous terms
     viscous_flow_if: if(viscous_flow) then

       do ie=1,2
         ! compute the viscous fluxes
         call set_turb_input( turb_in, grid%d,base, &
                   grad(:,:,:,ie_s(ie)), pb(:,:,ie) )
         call turb_mod( fem(:,:,:,ie) , grid%d,                 &
                        el_linear_size(grid%e(ie_s(ie))),       &
                        affmap(grid%s(is),base%xigs),           &
                        rho(:,ie), p(:,ie), uu(:,:,ie), turb_in )
       enddo

       do it=1,dfu_size ! energy, momentum and tracers
         ! Formally we use here a Bassi-Rebay numerical flux, however
         ! a jump penalization term is implicitly added in the
         ! inviscid numerical flux: see the comments in side_numflux.
         fne(1+it,:) = fne(1+it,:) + &
           matmul( n , 0.5_wp*(fem(:,it,:,1)+fem(:,it,:,2)) )
       enddo

     endif viscous_flow_if

     ! include the quad. weights
     do iv=1,size(uuu,1) ! loop on the variables
       fne(iv,:) = wgsa*fne(iv,:)
     enddo

     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !3) Compute the boundary integrals
     tnd = 0.0_wp ! side/element tendencies
     do ie=1,2 ! loop on the two elements of the side
       do l=1,base%ms ! loop on the quad. nodes
         do iv=1,size(uuu,1) ! loop on the variables
           tnd(iv,:,ie) = tnd(iv,:,ie) + fne(iv,l)*pb(:,l,ie)
         enddo
       enddo
     enddo
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !4) Add the new tendencies
     do ie=1,2
       !$omp critical
       tens(:,:,ie_s(ie)) = tens(:,:,ie_s(ie)) - sigma(ie)*tnd(:,:,ie)
       !$omp end critical
     enddo
     !------------------------------------------------------------------

   enddo internal_side_do
   !$omp end do
   deallocate( turb_in%gsuu , turb_in%gtt , turb_in%gth , turb_in%gcc )
   !$omp end parallel

   !$omp parallel &
   !$omp   private( is , ie_s , is_loc , i_perm , pb , n , wgsa , &
   !$omp            uuug , rho , e , u , ekin , p_p , p , hh ,    &
   !$omp            uu , l , fne , iv , tnd , aun , lambda ,   &
   !$omp            rgcv , gamma ) &
   !$omp   shared( grid , base , uuu , atm_ref_s , tens , phc , &
   !$omp           uuu_bcs , viscous_flow , nflux_bcs ) &
   !$omp   default(none)
   rgcv = phc%rgas / phc%cv
   gamma = phc%gamma
   !$omp do schedule(static)
   boundary_side_do: do is=grid%ni+1,grid%ns

     !------------------------------------------------------------------
     !1) Preliminary computations

     ! element of the side
     ie_s(1) = grid%s(is)%ie(1)

     ! side local indexes on the element
     is_loc(1) = grid%s(is)%isl(1)

     ! base function evaluations on the side quad. nodes
       ! index of the element -> side permutation (perm. pi)
       i_perm = grid%e(ie_s(1))%pi(is_loc(1)) 
       pb(:,:,1) = & ! reorder on second index (side quad. node)
         base%pb(:, & ! first index unchanged
          base%stab(i_perm,:), & ! second index: reordered with pi
          is_loc(1)) ! third index: local side

     n = grid%e(ie_s(1))%n(:,is_loc(1))

     ! rescale the quadrature weights
     wgsa = (grid%s(is)%a/base%me%voldm1) * base%wgs

     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Compute the numerical fluxes (include quad. weights)

     !2.1) evaluate the solution in the quadrature nodes: the values
     !in the ghost element are defined according to the boundary
     !condition
     uuug(:,:,1) = matmul( uuu(:,:,ie_s(1)) , pb(:,:,1) )
     uuug(:,:,2) = uuu_bcs(:,:,is)
     call side_diags(rho, e, u, ekin, p_p, p, hh, uu, cc, aun, lambda, &
                     is, uuug, grid%d, rgcv, gamma, n, base%ms )

     !2.2) evaluate the normal numerical flux fne
     call side_numflux(fne,grid%d,base%ms,n,u,hh,uu,p_p,cc,lambda,uuug)

     ! add the viscous terms
     b_viscous_flow_if: if(viscous_flow) then
       fne(2:,:) = fne(2:,:) + nflux_bcs(:,:,is) ! all except density
     endif b_viscous_flow_if

     ! include the quad. weights
     do iv=1,size(uuu,1) ! loop on the variables
       fne(iv,:) = wgsa*fne(iv,:)
     enddo

     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !3) Compute the boundary integrals
     tnd(:,:,1) = 0.0_wp ! side/element tendencies
     do l=1,base%ms ! loop on the quad. nodes
       do iv=1,size(uuu,1) ! loop on the variables
         tnd(iv,:,1) = tnd(iv,:,1) + fne(iv,l)*pb(:,l,1)
       enddo
     enddo
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !4) Add the new tendencies
     !$omp critical
     tens(:,:,ie_s(1)) = tens(:,:,ie_s(1)) - sigma(1)*tnd(:,:,1)
     !$omp end critical
     !------------------------------------------------------------------

   enddo boundary_side_do
   !$omp end do
   !$omp end parallel

 contains

  pure subroutine side_diags(rho, e, u, ekin, p_p, p, hh, uu, cc, aun, &
    lambda, is, uuug, d, rgcv, gamma, n, ms)
   integer, intent(in) :: is, d, ms
   real(wp), intent(in) :: uuug(:,:,:), rgcv, gamma, n(:)
   real(wp), intent(out) :: rho(:,:), e(:,:), u(:,:,:), ekin(:,:), &
     p_p(:,:), p(:,:), hh(:,:), uu(:,:,:), cc(:,:,:), aun(:,:), lambda(:)

   integer :: ie, id, it, l

    do ie=1,2
      rho(:,ie) = uuug(1,:,ie) + atm_ref_s(:,is)%rho
      e(:,ie)   = uuug(2,:,ie) + atm_ref_s(:,is)%e
      u(:,:,ie) = uuug(2+1:2+d,:,ie)

      ! kinetic energy
      ekin(:,ie) = 0.5_wp * sum( u(:,:,ie)**2 , 1 ) / rho(:,ie)
      ! pressure perturbation
      p_p(:,ie)  = rgcv*( uuug(2,:,ie) &
        - ekin(:,ie) - atm_ref_s(:,is)%phi*uuug(1,:,ie) )
      ! total pressure
      p(:,ie)    = p_p(:,ie) + atm_ref_s(:,is)%p
      ! specific enthalpy
      hh(:,ie)   = (e(:,ie)+p(:,ie))/rho(:,ie)
      ! velocity
      do id=1,d
        uu(id,:,ie) = u(id,:,ie)/rho(:,ie)
      enddo
      ! tracers
      do it=1,ntrcs
        cc(it,:,ie) = uuug(2+grid%d+it,:,ie)/rho(:,ie)
      enddo

      ! Compute a + |u \cdot n|
      aun(:,ie) = sqrt(gamma*p(:,ie)/rho(:,ie)) &
                 + abs( matmul( n , uu(:,:,ie) ) )

    enddo

    do l=1,ms
      lambda(l) = max( aun(l,1) , aun(l,2) )
    enddo

  end subroutine side_diags

  !> Inviscid single valued numerical fluxes
  !!
  !! The numerical fluxes are computed with the Rusanov flux. Notice
  !! that the jump terms are proportional to \f$\lambda\f$, which is
  !! nonzero also when the velocity is zero, because of the positive
  !! sound speed. Thus, this contributes a stabilization terms also in
  !! absence of a mean flow. The net effect is a stabilized flux even
  !! if no \f$c_{11}\f$ term is explicitly introduced in the viscous
  !! numerical fluxes.
  pure subroutine side_numflux(fne,d,ms,n,u,hh,uu,p_p,cc,lambda,uuug)
   integer, intent(in) :: d, ms
   real(wp), intent(in) :: n(:), u(:,:,:), hh(:,:), uu(:,:,:), p_p(:,:), &
     cc(:,:,:), lambda(:), uuug(:,:,:)
   real(wp), intent(out) :: fne(:,:)

   integer :: id, jd, it, iv
   real(wp) :: ff(d,ms,2)

    ! first the centered terms
    ff = u    ! mass
    fne(1,:) = matmul( n , 0.5_wp*(ff(:,:,1)+ff(:,:,2)) )
    do id=1,d ! energy
      ff(id,:,:) = hh*u(id,:,:)
    enddo
    fne(2,:) = matmul( n , 0.5_wp*(ff(:,:,1)+ff(:,:,2)) )
    do jd=1,grid%d ! j-th momentum
      do id=1,grid%d
        ff(id,:,:) = uu(jd,:,:)*u(id,:,:)
      enddo
      ff(jd,:,:) = ff(jd,:,:) + p_p
      fne(2+jd,:) = matmul(n,0.5_wp*(ff(:,:,1)+ff(:,:,2)))
    enddo
    do it=1,ntrcs ! tracers
      do id=1,grid%d
        ff(id,:,:) = cc(it,:,:)*u(id,:,:)
      enddo
      fne(2+grid%d+it,:) = matmul(n,0.5_wp*(ff(:,:,1)+ff(:,:,2)))
    enddo

    ! then the jump terms
    do iv=1,size(uuug,1) ! loop on the variables
      fne(iv,:) = fne(iv,:) + &
        0.5_wp*lambda*(sigma(1)*uuug(iv,:,1)+sigma(2)*uuug(iv,:,2))
    enddo

  end subroutine side_numflux

 end subroutine tens_side_loop
 
!-----------------------------------------------------------------------

 pure &
 !$ subroutine omp_dummy_5(); end subroutine omp_dummy_5
 subroutine grad_side_loop(grad,grid,base,uuu)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp),     intent(in) :: uuu(:,:,:)
  real(wp),     intent(inout) :: grad(:,:,:,:)
 
  integer :: is, ie_s(2), is_loc(2), ie, i_perm, iv, i
  real(wp) :: uue(size(grad,2),base%ms,2), uus(size(grad,2),base%ms), &
    pb(base%pk,base%ms), gmd(base%pk,base%ms,2),       &
    a_ns_db(grid%m), gq(base%pk)
 
   !$omp parallel do schedule(static) &
   !$omp   private( is , ie , ie_s , is_loc , i_perm , pb , uue , uus , &
   !$omp            gmd , a_ns_db , iv , gq , i ) &
   !$omp   shared( grid , base , uuu , atm_ref_s , gammad , grad ) &
   !$omp   default(none)
   internal_side_do: do is=1,grid%ni

     !------------------------------------------------------------------
     !1) Preliminary computations

     ! the two elements of the side
     ie_s = grid%s(is)%ie

     ! side local indexes on the two elements
     is_loc = grid%s(is)%isl

     do ie=1,2 ! loop on the two elements of the side

       ! index of the element -> side permutation (perm. pi)
       i_perm = grid%e(ie_s(ie))%pi(is_loc(ie))

       ! base function evaluations on the side quad. nodes
       pb(:,:) = & ! reorder on second index (side quad. node)
         base%pb(:, & ! first index unchanged
          base%stab(i_perm,:), & ! second index: reordered with pi
          is_loc(ie)) ! third index: local side

       ! evaluate the solution in the quadrature nodes
       call grad_diagnostics(uue(:,:,ie), grid,                      &
         matmul(uuu(:,:,ie_s(ie)),pb),                               &
         atm_ref_s(:,is)%rho, atm_ref_s(:,is)%e, atm_ref_s(:,is)%phi )

       ! side coefficients gamma
       gmd(:,:,ie) = gammad(:, & ! same permutation as for pb
          base%stab(i_perm,:), is_loc(ie))

     enddo
     ! LDG (or Bassi-Rebay) numerical flux
     uus = 0.5_wp*(uue(:,:,1)+uue(:,:,2))
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Compute the numerical fluxes
     do ie=1,2
       a_ns_db = (grid%s(is)%a/grid%e(ie_s(ie))%det_b) &
                      * grid%e(ie_s(ie))%n(:,is_loc(ie))
       do iv=1,size(grad,2) ! variable loop
         gq = matmul( gmd(:,:,ie) , uus(iv,:) )
         do i=1,base%pk
           !$omp critical
           grad(:,iv,i,ie_s(ie)) = grad(:,iv,i,ie_s(ie)) + &
                                   a_ns_db * gq(i)
           !$omp end critical
         enddo
       enddo
     enddo
     !------------------------------------------------------------------

   enddo internal_side_do
   !$omp end parallel do

   !$omp parallel do schedule(static) &
   !$omp   private( is , ie_s , is_loc , i_perm , pb , uue , uus , &
   !$omp            gmd , a_ns_db , iv , gq , i ) &
   !$omp   shared( grid , base , uuu , atm_ref_s , gammad , &
   !$omp           grad , uuu_bcs ) &
   !$omp   default(none)
   boundary_side_do: do is=grid%ni+1,grid%ns

     !------------------------------------------------------------------
     !1) Preliminary computations

     ! the element of the side
     ie_s(1) = grid%s(is)%ie(1)

     ! side local index on the element
     is_loc(1) = grid%s(is)%isl(1)

       ! index of the element -> side permutation (perm. pi)
       i_perm = grid%e(ie_s(1))%pi(is_loc(1))

       ! base function evaluations on the side quad. nodes
       pb(:,:) = & ! reorder on second index (side quad. node)
         base%pb(:, & ! first index unchanged
          base%stab(i_perm,:), & ! second index: reordered with pi
          is_loc(1)) ! third index: local side

       ! evaluate the solution in the quadrature nodes
       call grad_diagnostics(uue(:,:,1), grid,                       &
         matmul(uuu(:,:,ie_s(1)),pb),                                &
         atm_ref_s(:,is)%rho, atm_ref_s(:,is)%e, atm_ref_s(:,is)%phi )

       ! side coefficients gamma
       gmd(:,:,1) = gammad(:, & ! same permutation as for pb
          base%stab(i_perm,:), is_loc(1))

       ! boundary datum
       call grad_diagnostics(uue(:,:,2), grid, uuu_bcs(:,:,is),      &
         atm_ref_s(:,is)%rho, atm_ref_s(:,is)%e, atm_ref_s(:,is)%phi )

     ! LDG (or Bassi-Rebay) numerical flux
     uus = 0.5_wp*(uue(:,:,1)+uue(:,:,2))
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Compute the numerical fluxes
       a_ns_db = (grid%s(is)%a/grid%e(ie_s(1))%det_b) &
                      * grid%e(ie_s(1))%n(:,is_loc(1))
       do iv=1,size(grad,2) ! variable loop
         gq = matmul( gmd(:,:,1) , uus(iv,:) )
         do i=1,base%pk
           !$omp critical
           grad(:,iv,i,ie_s(1)) = grad(:,iv,i,ie_s(1)) + &
                                   a_ns_db * gq(i)
           !$omp end critical
         enddo
       enddo
     !------------------------------------------------------------------

   enddo boundary_side_do
   !$omp end parallel do

 end subroutine grad_side_loop
 
!-----------------------------------------------------------------------
 
 !> Prefetch the boundary data
 !!
 !! Fill the array \c uuu_bcs collecting the appropriate values either
 !! from the prescribed boundary conditions or, for domain
 !! decomposition sides, from the <tt>ddc\%nd\%fecbuf</tt> module
 !! buffers.
 !!
 !! \note This subroutines reads data from the module variable \c ddc,
 !! which thus <em>must be suitably set before calling</em>.
 !!
 !! \warning The lower bound of the input argument uuu_bcs is set to
 !! <tt>grid%ni+1</tt>, in analogy with the module variable with the
 !! same name.
 !<
 pure &
 !$ subroutine omp_dummy_6(); end subroutine omp_dummy_6
 subroutine update_bcs(uuu_bcs,grid,base,bcs,uuu,err)
  type(t_grid),      intent(in) :: grid
  type(t_base),      intent(in) :: base
  type(t_bcs),       intent(in) :: bcs
  real(wp),          intent(in) :: uuu(:,:,:)
  real(wp),          intent(out) :: uuu_bcs(:,:,grid%ni+1:)
  type(t_bcs_error), intent(out) :: err
 
  integer :: is, ie_s, is_loc, i_perm
  real(wp) :: u_r(2,base%ms), pb(base%pk,base%ms), &
    uuui(size(uuu,1),base%ms), n_bnd(grid%m,base%ms)
  character(len=*), parameter :: &
    this_sub_name = 'update_bcs'

   !$omp parallel do schedule(static) &
   !$omp   private( is , ie_s , is_loc , i_perm , pb , u_r , n_bnd , &
   !$omp            uuui ) &
   !$omp   shared( grid , base , bcs , uuu , atm_ref_s , uuu_bcs , &
   !$omp           ddc , err , coeff_dir , coeff_norm ) &
   !$omp   default(none)
   boundary_side_do: do is=grid%ni+1,grid%ns

     bcs_type: select case(bcs%b_s2bs(is)%p%bc)

      case(b_dir)
       u_r(1,:) = atm_ref_s(:,is)%rho
       u_r(2,:) = atm_ref_s(:,is)%e
       uuu_bcs(:,:,is) = coeff_dir( affmap(grid%s(is),base%xigs) , &
                          u_r = u_r , breg = -grid%s(is)%ie(2) )

      case(b_neu)
       ! we need to reconstruct the internal value (see the details in
       ! tens_side_loop)
       ie_s   = grid%s(is)%ie(1)
       is_loc = grid%s(is)%isl(1)
       i_perm = grid%e(ie_s)%pi(is_loc) 
       pb = base%pb( : , base%stab(i_perm,:) , is_loc )
       uuui = matmul( uuu(:,:,ie_s) , pb )
       uuu_bcs(1,:,is) = uuui(1,:) ! symmetric
       uuu_bcs(2,:,is) = uuui(2,:) ! symmetric
       uuu_bcs(3+grid%d:,:,is) = uuui(3+grid%d:,:) ! symmetric
       neumann_type: select case(bcs%b_s2bs(is)%p%btype)
        case(1) ! no-flux, the mesh normal is used
         uuu_bcs(2+1:2+grid%d,:,is) = reflect_vect(          &
             grid%e(ie_s)%n(:,is_loc) , uuui(2+1:2+grid%d,:) )
        case(2) ! no-flux, user defined normal (immersed boundary)
         n_bnd = coeff_norm( affmap(grid%s(is),base%xigs) , &
                             -grid%s(is)%ie(2) )
         ! simple consistency check on the side normal computation
         if(any(abs(n_bnd).gt.1.1_wp)) then
           !$omp critical
           err%lerr = .true.
           write(err%message,'(a,i10,a,i10,a)') &
             "Problems computing the boundary normal on side ",is, &
             ", region ",-grid%s(is)%ie(2),"."
           !$omp end critical
           uuu_bcs = 0.0_wp
         else
           uuu_bcs(2+1:2+grid%d,:,is) = reflect_vect( n_bnd , &
                                          uuui(2+1:2+grid%d,:) )
         endif
        case(3) ! free flux (zero normal derivative)
         uuu_bcs(2+1:2+grid%d,:,is) = uuui(2+1:2+grid%d,:)
        case default
         !$omp critical
         err%lerr = .true.
         write(err%message,'(a,i10,a,i10,a)') &
      "Unknown boundary type ",bcs%b_s2bs(is)%p%btype," on side ",is,"."
         !$omp end critical
       end select neumann_type

      case(b_ddc)
       is_loc = ddc%s2nd(1,is) ! used as temporary
       i_perm = ddc%s2nd(2,is) ! used as temporary
       uuu_bcs(:,:,is) = ddc%nd( is_loc )%recbuf(:,:, i_perm )

     end select bcs_type

   enddo boundary_side_do
   !$omp end parallel do
 
 end subroutine update_bcs
 
!-----------------------------------------------------------------------

 !> Prefetch the <em>gradient</em> boundary data
 !!
 !! Fill the array \c nflux_bcs collecting the appropriate values either
 !! from the prescribed boundary conditions or, for domain
 !! decomposition sides, from the <tt>ddc\%nd\%recbuf</tt> module
 !! buffers.
 !!
 !! \note This subroutines reads data from the module variable \c ddc,
 !! which thus <em>must be suitably set before calling</em>.
 !!
 !! \warning The lower bound of the input argument nflux_bcs is
 !! set to <tt>grid%ni+1</tt>, in analogy with the module variable
 !! with the same name.
 !<
 pure &
 !$ subroutine omp_dummy_7(); end subroutine omp_dummy_7
 subroutine update_fluxbcs(nflux_bcs,grid,base,bcs,uuu,grad)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  type(t_bcs),  intent(in) :: bcs
  real(wp),     intent(in) :: uuu(:,:,:), grad(:,:,:,:)
  real(wp),     intent(out) :: nflux_bcs(:,:,grid%ni+1:)
 
  integer :: is, ie_s, is_loc, l, id, i
  real(wp) :: u_r(2,base%ms), fb(grid%d,dfu_size,base%ms)
  character(len=*), parameter :: &
    this_sub_name = 'update_fluxbcs'

   !$omp parallel do schedule(static) &
   !$omp   private( is , ie_s , is_loc , l, u_r, fb ) &
   !$omp   shared( grid , base , bcs , atm_ref_s , nflux_bcs , &
   !$omp           coeff_neu , uuu , grad , phc , ddc ) &
   !$omp   default(none)
   boundary_side_do: do is=grid%ni+1,grid%ns

     ie_s = grid%s(is)%ie(1)
     is_loc = grid%s(is)%isl(1)
     bcs_type: select case(bcs%b_s2bs(is)%p%bc)

      ! For Dirichlet bcs we read the numerical flux from the boundary
      ! element (see tens_side_loop for details)
      case(b_dir)
       call elemside_viscflux( fb , ie_s , is_loc ,        &
            uuu(:,:,ie_s) , grad(:,:,:,ie_s) , grid , base )
       do l=1,base%ms
         ! energy, momentum and tracers
         nflux_bcs(:,l,is) = matmul(grid%e(ie_s)%n(:,is_loc),fb(:,:,l))
       enddo

      case(b_neu)
       u_r(1,:) = atm_ref_s(:,is)%rho
       u_r(2,:) = atm_ref_s(:,is)%e
       ! boundary fluxes
       fb = coeff_neu( affmap(grid%s(is),base%xigs) , &
                 u_r = u_r , breg = -grid%s(is)%ie(2) )
       do l=1,base%ms
         nflux_bcs(:,l,is) = matmul(grid%e(ie_s)%n(:,is_loc),fb(:,:,l))
       enddo

      case(b_ddc)
       ! we have to compute the complete numerical flux: Bassi and
       ! Rebay flux (this must be consistent with tens_side_loop)
       call elemside_viscflux( fb , ie_s , is_loc ,        &
            uuu(:,:,ie_s) , grad(:,:,:,ie_s) , grid , base )
       do l=1,base%ms
         id = ddc%s2nd(1,is)
         i  = ddc%s2nd(2,is)
         ! energy, momentum and tracers
         nflux_bcs(:,l,is) = 0.5_wp * ( ddc%nd(id)%fecbuf(:,l,i ) + &
                     matmul( grid%e(ie_s)%n(:,is_loc) , fb(:,:,l) ) )
       enddo

     end select bcs_type

   enddo boundary_side_do
   !$omp end parallel do
 
 end subroutine update_fluxbcs
 
!-----------------------------------------------------------------------
 
 !> Viscous fluxes on the side of one element (at the quad nodes)
 !!
 !! \note These are the <em>internal</em> fluxes, prior to the
 !! computation of any numerical flux.
 !<
 pure subroutine elemside_viscflux(vf,ie,isl,uue,grae,grid,base)
  integer,      intent(in) :: ie, isl
  real(wp),     intent(in) :: uue(:,:), grae(:,:,:) ! local solution
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp),     intent(out) :: vf(:,:,:)
 
  integer :: is, i_perm, id
  real(wp) :: pb(base%pk,base%ms), uueg(size(uue,1),base%ms), &
    rho(base%ms), u(grid%d,base%ms), uu(grid%d,base%ms),      &
    ekin(base%ms), p_p(base%ms), p(base%ms)
  type(t_turb_input) :: turb_in

   allocate( turb_in%gsuu(grid%d,grid%d,base%ms) , &
             turb_in%gtt (       grid%d,base%ms) , &
             turb_in%gth (       grid%d,base%ms) , &
             turb_in%gcc (grid%d, ntrcs,base%ms) )

   ! get the side index
   is = grid%e(ie)%is(isl)

   ! diagnose the local solution on the side quad. nodes
   i_perm = grid%e(ie)%pi(isl)
   pb = base%pb( : , base%stab(i_perm,:) , isl )
   uueg = matmul( uue , pb )
   rho = uueg(1,:) + atm_ref_s(:,is)%rho
   u   = uueg(2+1:2+grid%d,:)
   do id=1,grid%d
     uu(id,:) = u(id,:)/rho
   enddo
   ! kinetic energy
   ekin = 0.5_wp * sum( u**2 , 1 ) / rho
   ! pressure perturbation
   p_p  = (phc%rgas/phc%cv) &
          * ( uueg(2,:) - ekin - atm_ref_s(:,is)%phi*uueg(1,:) )
   ! total pressure
   p    = p_p + atm_ref_s(:,is)%p

   ! compute the nodal numerical fluxes
   call set_turb_input(turb_in, grid%d,base,grae,pb)
   call turb_mod(vf , grid%d, el_linear_size(grid%e(ie)), &
     affmap(grid%s(is),base%xigs), rho, p, uu, turb_in )

   deallocate( turb_in%gsuu , turb_in%gtt , turb_in%gth , turb_in%gcc )

 end subroutine elemside_viscflux
 
!-----------------------------------------------------------------------
 
 !> Evaluate all the variables that are required to compute the
 !! turbulent fluxes.
 !!
 !! This subroutine fills a \c t_turb_input object diagnosing all the
 !! information that is required by a turbulence model. Such
 !! information can be defined pointwise or can imply element
 !! averages.
 !!
 !! \note The spatial position at which the diagnostics are computed
 !! is determined implicitly by the input argument base_p; in
 !! particular, this determines whether element or boundary values are
 !! computed.
 !!
 !! \note The element averages recompute the linear combinations of
 !! coefficients and basis function; this is necessary since the
 !! points specified in \c base_p are generic points, while the
 !! averages must be computed on the element quandrature points.
 !<
 pure subroutine set_turb_input(ti, d,base,grae,base_p)
  integer, intent(in) :: d !< space dimension
  type(t_base), intent(in) :: base !< FE basis
  real(wp), intent(in) :: grae(:,:,:) !< element gradients
  real(wp), intent(in) :: base_p(:,:) !< basis functions
  type(t_turb_input), intent(inout) :: ti
 
  integer :: id, jd
  real(wp) :: s2(base%m)
  character(len=*), parameter :: &
    this_sub_name = 'set_turb_input'

   ! evaluate the gradients at the quad. nodes
   do id=1,d
     do jd=1,id-1; ti%gsuu(id,jd,:) = ti%gsuu(jd,id,:); enddo
     do jd=id,d
       ti%gsuu(id,jd,:) = matmul(grae(jd,1+id,:)+grae(id,1+jd,:),base_p)
     enddo
     ti%gtt(id,:) = matmul( grae(id, 1 ,:) , base_p )
     ti%gth(id,:) = matmul( grae(id,2+d,:) , base_p )
     ti%gcc(id,:,:) = matmul( grae(id,3+d:,:) , base_p )
   enddo

   ! element average of the deformation rate
   s2 = 0.0_wp
   do id=1,d
     do jd=1,d
       s2 = s2 + matmul(grae(jd,1+id,:)+grae(id,1+jd,:),base%p)**2
     enddo
   enddo
   ti%s2_e = sum(base%wg*s2)/base%me%vol

   ! element average of the vertical derivative of theta
   ti%gzth_e = sum(base%wg*matmul(grae(d,2+d,:),base%p))/base%me%vol
   ! notice that exploiting the orthogonality of the basis we could
   ! use the simpler form
   !   ti%gzth_e = grae(d,2+d,1)*base_p(1,1)

 end subroutine set_turb_input
 
!-----------------------------------------------------------------------
 
 !> Compute the Courant number.
 !!
 !! This is a small utility which computes the maximum Courant number
 !! \f{displaymath}{
 !!  C = \frac{u\Delta t}{h}.
 !! \f}
 !! Two numbers are computed: in one case the velocity is the sum of
 !! the sound speed and the advective velocity \f$|\underline{u}\f$,
 !! while in the second case only the advective velocity is
 !! considered; in both cases, the element size is computed with the
 !! function <tt>el_linear_size</tt> of <tt>mod_grid</tt>.
 !<
 pure &
 !$ subroutine omp_dummy_8(); end subroutine omp_dummy_8
 subroutine compute_courant(c_tot,c_adv,grid,base,dt,uuu)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  real(wp), intent(in) :: dt, uuu(:,:,:)
  real(wp), intent(out) :: c_tot, c_adv
 
  integer :: ie, id
  real(wp) :: uuug(2+grid%d,base%m), rho(base%m), u(grid%d,base%m),  &
    ekin(base%m), p_p(base%m), p(base%m), uu_norm(base%m), a(base%m),&
    he, c_tot_e, c_adv_e, rgcv, gamma
  character(len=*), parameter :: &
    this_sub_name = 'compute_courant'

   c_tot = -1.0_wp
   c_adv = -1.0_wp
   !$omp parallel &
   !$omp   private( ie , uuug , rho , u , ekin , p_p , p , uu_norm , &
   !$omp           id , a , he , c_tot_e , c_adv_e , rgcv , gamma ) &
   !$omp   shared( grid , base , uuu , atm_ref_e , dt , phc ) &
   !$omp   reduction( max : c_tot , c_adv) &
   !$omp   default(none)
   rgcv = phc%rgas / phc%cv
   gamma = phc%gamma
   !$omp do schedule(static)
   elem_do: do ie=1,grid%ne

     !------------------------------------------------------------------
     !1) Solution in physical space (ignoring tracers)
     uuug = matmul( uuu(:2+grid%d,:,ie) , base%p )
     rho = uuug(1,:) + atm_ref_e(:,ie)%rho
     u   = uuug(3:,:)

     ! kinetic energy
     ekin = 0.5_wp * sum( u**2 , 1 ) / rho
     ! pressure perturbation
     p_p  = rgcv*( uuug(2,:) - ekin - atm_ref_e(:,ie)%phi*uuug(1,:) )
     ! total pressure
     p    = p_p + atm_ref_e(:,ie)%p
     ! velocity
     uu_norm = 0.0_wp
     do id=1,grid%d
       uu_norm = uu_norm + (u(id,:)/rho)**2
     enddo
     uu_norm = sqrt(uu_norm)
     ! sound speed
     a = sqrt(gamma*p/rho)
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !2) Element maximum
     he = el_linear_size(grid%e(ie))
     c_tot_e = dt/he*maxval(a+uu_norm)
     c_adv_e = dt/he*maxval(  uu_norm)
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     !3) Update global maximum
     c_tot = max(c_tot,c_tot_e)
     c_adv = max(c_adv,c_adv_e)
     !------------------------------------------------------------------

   enddo elem_do
   !$omp end do
   !$omp end parallel
 
 end subroutine compute_courant
 
!-----------------------------------------------------------------------
 
 !> Additional diagnostics
 !!
 !! This subroutine can be used to compute additional diagnostics
 !! which require the data structures defined in this module
 !! (especially the gradients) and whose computation in other parts of
 !! the code would otherwise be awkward. The diagnostics are expressed
 !! as coefficients of the finite element basis.
 !!
 !! \note The diagnostics are not, in general, polynomial functions,
 !! so that the representation in terms of the finite element basis is
 !! not exact. In particular, it is possible that the finite element
 !! representation displays negative values also for diagnostics that
 !! should be nonnegative by definition, and such negative values
 !! could appear also at the quadrature points. However, this is not a
 !! problem for the code, which never uses the finite element
 !! representation of the coefficients, but only the coefficient
 !! values at the quadrature nodes.
 !!
 !! \note This subroutine includes a call to \c dgcomp_grad to make
 !! sure that the gradient is consistent with the solution state \c
 !! uuu provided in input. Sometimes this is not necessary, because
 !! the module variable \c grad is left already in the correct state
 !! by a previous call to \c dgcomp_tens. However, this is not always
 !! the case; for instance, this is not the case when using a
 !! Runge-Kutta time integrator.
 !<
 subroutine additional_diagnostics(diags,grid,base,bcs,uuu,err, &
                                   viscous_flow)
  type(t_grid), intent(in) :: grid
  type(t_base), intent(in) :: base
  type(t_bcs),  intent(in) :: bcs
  real(wp),     intent(in) :: uuu(:,:,:)
  logical,      intent(in) :: viscous_flow
  real(wp), allocatable, intent(out) :: diags(:,:,:)
  type(t_bcs_error), intent(out) :: err
 
  integer :: ie, id
  real(wp) :: &
    uuug(2+grid%d,base%m), rho(base%m), u(grid%d,base%m), &
    ekin(base%m), p_p(base%m), p(base%m), uu(grid%d,base%m)
  real(wp) :: fem(grid%d,dfu_size,base%m)
  type(t_turb_input) :: turb_in
  type(t_turb_diags) :: turb_diags
  character(len=*), parameter :: &
    this_sub_name = 'additional_diagnostics'

   if(viscous_flow) then

     ! update the module variable grad
     call dgcomp_grad(grid,base,bcs,uuu,err)

     allocate(diags(5,base%pk,grid%ne)); diags = 0.0_wp
     allocate( turb_in%gsuu(grid%d,grid%d,base%m) , &
               turb_in%gtt (       grid%d,base%m) , &
               turb_in%gth (       grid%d,base%m) , &
               turb_in%gcc (grid%d, ntrcs,base%m) )
     elem_do: do ie=1,grid%ne

       ! evaluate the solution in the quadrature nodes
       uuug = matmul( uuu(:2+grid%d,:,ie) , base%p )
       rho = uuug(1,:) + atm_ref_e(:,ie)%rho
       u   = uuug(2+1:2+grid%d,:)

       ! kinetic energy
       ekin = 0.5_wp * sum( u**2 , 1 ) / rho
       ! pressure perturbation
       p_p  = (phc%rgas/phc%cv)                                    &
              * ( uuug(2,:) - ekin - atm_ref_e(:,ie)%phi*uuug(1,:) )
       ! total pressure
       p    = p_p + atm_ref_e(:,ie)%p
       ! velocity
       do id=1,grid%d
         uu(id,:) = u(id,:)/rho
       enddo

       call set_turb_input(turb_in, grid%d,base,grad(:,:,:,ie),base%p)
       call turb_mod(fem , grid%d, el_linear_size(grid%e(ie)), &
                     affmap(grid%e(ie),base%xig),              &
                     rho, p, uu, turb_in, turb_diags)

       ! the projections are simple because the basis is orthonormal
       if(allocated(turb_diags%s_abs)) &
         diags(1,:,ie) = matmul( base%p , base%wg*turb_diags%s_abs )
       if(allocated(turb_diags%ri   )) &
         diags(2,:,ie) = matmul( base%p , base%wg*turb_diags%ri    )
       if(allocated(turb_diags%nu   )) &
         diags(3,:,ie) = matmul( base%p , base%wg*turb_diags%nu    )
       if(allocated(turb_diags%diss )) &
         diags(4,:,ie) = matmul( base%p , base%wg*turb_diags%diss  )
       if(allocated(turb_diags%s2_e )) &
         diags(5,:,ie) = matmul( base%p , base%wg*turb_diags%s2_e  )

     enddo elem_do
     deallocate( turb_in%gsuu , turb_in%gtt , turb_in%gth , turb_in%gcc)

   else

     allocate(diags(0,base%pk,grid%ne))

   endif

 end subroutine additional_diagnostics

!-----------------------------------------------------------------------
 
 !> Reflect a vector \f$\underline{v}\f$ with respect to a surface
 !! with normal \f$\underline{n}\f$.
 !!
 !! To contrsuct the reflected vector, we first represent the given
 !! vector as
 !! \f{equation}{
 !! \underline{v} = \underline{n}\otimes\underline{n}\,\underline{v}
 !!   + \left[ \underline{v} -
 !!   \underline{n}\otimes\underline{n}\,\underline{v} \right]
 !! \f}
 !! and then change the sign of the first component (the normal
 !! component), yielding
 !! \f{equation}{
 !! \underline{v}^{\mathcal{R}} = \left[ \mathcal{I} -
 !! 2\underline{n}\otimes\underline{n} \right] \underline{v}.
 !! \f}
 pure function reflect_vect_1n(n,v) result(vr)
  real(wp), intent(in) :: n(:), v(:,:)
  real(wp) :: vr(size(v,1),size(v,2))
 
  integer :: i, j
  real(wp) :: non(size(n),size(n))

   do j=1,size(n)
     do i=1,size(n)
       non(i,j) = n(i)*n(j)
     enddo
   enddo

   vr = v - 2.0_wp*matmul(non,v)

 end function reflect_vect_1n
 
!-----------------------------------------------------------------------

 !> As \c reflect_vect but with a space dependent normal.
 pure function reflect_vect_var(n,v) result(vr)
  real(wp), intent(in) :: n(:,:), v(:,:)
  real(wp) :: vr(size(v,1),size(v,2))

  integer :: i, j, l
  real(wp) :: non(size(n,1),size(n,1))

   do l=1,size(n,2)
     do j=1,size(n,1)
       do i=1,size(n,1)
         non(i,j) = n(i,l)*n(j,l)
       enddo
     enddo
     vr(:,l) = v(:,l) - 2.0_wp*matmul(non,v(:,l))
   enddo

 end function reflect_vect_var

!-----------------------------------------------------------------------
 
 !> Simple function to diagnose the viariables which we compute the
 !! gradient of
 !!
 !! The details concerning the discrete gradients are given in the
 !! documentation of the module variable \c grad.
 !!
 !! The potential temperature is computed as
 !! \f{displaymath}{
 !!  \theta = \left(\frac{p_s}{R}\right)^\kappa \frac{T}{(\rho
 !!  T)^\kappa}.
 !! \f}
 !!
 !! \note All the input/output is intended at physical points (i.e.
 !! <em>not</em> in modal representation).
 !<
 pure subroutine grad_diagnostics( gd, grid, consv, &
                atm_ref_rho, atm_ref_e, atm_ref_phi )
  type(t_grid), intent(in) :: grid
  real(wp), intent(in) :: &
    consv(:,:), & !< conservative variables (deviations)
    !> reference atmosphere (at physical points)
    atm_ref_rho(:), atm_ref_e(:), atm_ref_phi(:)
  real(wp), intent(out) :: gd(:,:) !< diagnostics
 
  integer :: id, it
  real(wp), dimension(size(consv,2)) :: rho, e, t, theta
  real(wp), dimension(grid%d,size(consv,2)) :: u, uu

   ! include the reference state
   rho = consv(1,:) + atm_ref_rho
   e   = consv(2,:) + atm_ref_e
   u   = consv(2+1:2+grid%d,:)

   ! velocity
   do id=1,grid%d
     uu(id,:) = u(id,:)/rho
   enddo
   ! temperature
   t = (e/rho - 0.5_wp*sum(uu**2,1) - atm_ref_phi) / phc%cv
   ! potential temperature
   theta = (phc%p_s/phc%rgas)**phc%kappa * t * (rho*t)**(-phc%kappa)

   ! collect values
   gd(   1        ,:) = t
   gd(1+1:1+grid%d,:) = uu
   gd( grid%d+2   ,:) = theta
   do it=1,ntrcs ! tracers
     gd(2+grid%d+it,:) = consv(2+grid%d+it,:)/rho
   enddo
 
 end subroutine grad_diagnostics
 
!-----------------------------------------------------------------------

end module mod_dgcomp_rhs

