!! Copyright (C) 2009,2010,2011,2012  Marco Restelli
!!
!! This file is part of:
!!   LDGH -- Local Hybridizable Discontinuous Galerkin toolkit
!!
!! LDGH is free software: you can redistribute it and/or modify it
!! under the terms of the GNU General Public License as published by
!! the Free Software Foundation, either version 3 of the License, or
!! (at your option) any later version.
!!
!! LDGH is distributed in the hope that it will be useful, but WITHOUT
!! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
!! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
!! License for more details.
!!
!! You should have received a copy of the GNU General Public License
!! along with LDGH. If not, see <http://www.gnu.org/licenses/>.
!!
!! author: Marco Restelli                   <marco.restelli@gmail.com>


!> \brief 
!!
!! Newton method for nonlinear systems. 
!!
!! \n
!!
!! Given the nonlinear problem
!! \f{displaymath}{
!!  f(x) = 0,
!! \f}
!! the Newton method is
!! \f{displaymath}{
!!  \begin{array}{rcl}
!!  z^{(k)} & = & \displaystyle -J_f^{-1} f(x^{(k)}) \\[2mm]
!!  x^{(k+1)} & = & x^{(k)} + \omega_k z^{(k)}
!!  \end{array}
!! \f}
!! for suitably chosen relaxation parameters \f$\omega_k\f$.
!! In practice, the implementation relies on three subroutines:
!! <ul>
!!  <li> <tt>pres</tt>: compute the <em>preconditioned residual</em>
!!  \f$z\f$;
!!  <li> <tt>nres</tt>: compute the residual norm \f$\|f(x)\|\f$.
!!  <li> <tt>norm</tt>: compute the increment norm \f$\|x\|\f$; this
!!  can be used both for the solution and the increment.
!! </ul>
!! \note In fact, by proper definition of <tt>pres</tt>, any fixed
!! point method can be recovered.
!! \note The increment \f$\omega_kz\f$, or equivalently the
!! preconditioned residual \f$z\f$, is assumed to be "dimensionally"
!! homogeneous to \f$x\f$, while the residual \f$f(x)\f$ is not. In
!! fact, the residual \f$f(x)\f$ does not appear in this module
!! directly, but only as preconditioned residual or residual norm.
!!
!! \section globalization Globalization and Newton-Krylov techniques
!!
!! <em>Globalization</em> refers to the introduction of algorithms
!! which provide additional stability to the Newton method when the
!! approximate solution is far from the correct one and the function
!! \f$f\f$ is not well approximated by its differential.
!!
!! <em>Newton-Krylov</em> methods are obtained when the Newton method
!! is combined with a Krylov method for the solution of the linear
!! problem required at each iteration. Indeed, more generally, the
!! idea of the method can be applied whenever an iterative solver is
!! selected for the solution of the linear problem, and can be
!! summarized as the ability to pick an optimal tolerance for the
!! linear solver, given the accuracy of each Newton iterate. For the
!! first iterates, the solution computed by the Newton method would
!! not be very accurate anyway, so that it makes sense to choose a
!! rough tolerance for the iterative solver. On the contrary, once the
!! Newton method gets close to the solution, each Newton iterate
!! provides a very accurate approximate solution (ideally, the method
!! converges quadratically), and thus it makes sense to solve the
!! linear system with a stringent tolerance.
!!
!! In the present implementation we refer to <a
!! href="http://dx.doi.org/10.1137/0917003">[Eisenstat, Walker,
!! 1996]</a> for both globalization and Newton-Krylov aspects
!! (additional references are also <a
!! href="http://dx.doi.org/10.1137/0911026">[Brown, Saad, 1990]</a>
!! and <a href="http://dx.doi.org/10.1137/0804022">[Eisenstat, Walker,
!! 1994]</a>), although our globalization procedure is rather
!! simplified.
!!
!! The globalization is implemented by a simple relaxation parameter
!! \f$\omega_k\f$, which is halved whenever the computed increment would
!! result in an increase of the residual (up to a correction
!! coefficient) and doubled once the iteration is concluded, in view
!! of the subsequent iteration.
!!
!! Concerning the Newton-Krylov technique, all that it requires from
!! the viewpoint of the Newton solver is the computation of the
!! corresponding forcing terms \f$\eta_k\f$, which are then passed to
!! \c pres so that, when a particular implementation uses an iterative
!! solver and adapts the tolerance depending on the forcing term, a
!! Newton-Krylov method results. More in details, assuming that
!! \f$\eta_0\in[0,1)\f$ has been specified, and introducing the linear
!! residual
!! \f{displaymath}{
!!  r_{lin}^{(k)} = f(x^{(k)}) + J_fz^{(k)},
!! \f}
!! we proceed as follows.
!! <ul>
!!   <li> Compute \f$z^{(k)}\f$ solving the linear system with the
!!   stopping criterion
!!   \f{displaymath}{
!!    \|r_{lin}^{(k)}\| = \| f(x^{(k)}) + J_fz^{(k)} \| \leq \tilde{\eta}_k\|
!!    f(x^{(k)}) \|.
!!   \f}
!!   <li> Following (2.1) and (2.2) of <a
!!   href="http://dx.doi.org/10.1137/0917003">[Eisenstat, Walker,
!!   1996]</a>, set
!!   \f{displaymath}{
!!    \eta_{k+1} = \frac{ \| f(x^{(k)}+z^{(k)}) - r_{lin}^{(k)}\| }
!!    {\| f(x^{(k)}) \|}
!!   \f}
!!   or alternatively
!!   \f{displaymath}{
!!    \eta_{k+1} = \frac{ \left| \| f(x^{(k)}+z^{(k)}) \| -
!!    \|r_{lin}^{(k)}\| \right| }{\| f(x^{(k)}) \|}.
!!   \f}
!!   The rational behind the first choice can be seen by noting that
!!   \f{displaymath}{
!!     f(x^{(k)}+z^{(k)}) - r_{lin}^{(k)} = f(x^{(k)}+z^{(k)})
!!     -f(x^{(k)}) - J_fz^{(k)} = \Delta f - J_f\Delta x
!!   \f}
!!   so that \f$\eta_{k+1}\f$ will be small when \f$J_f\f$ provides a
!!   good approximation of \f$f\f$ and large otherwise; the rational
!!   behind the second choice can be understood by noting that
!!   \f{displaymath}{
!!    \left| \| f(x^{(k)}+z^{(k)}) \| - \|r_{lin}^{(k)}\| \right| \leq
!!    \| f(x^{(k)}+z^{(k)}) - r_{lin}^{(k)}\| 
!!   \f}
!!   so that the second choice results in solutions that are at least
!!   as accurate as the first one.
!!   <li> Some safeguards should now be applied to \f$\eta_{k+1}\f$ to
!!   avoid unnecessarily small values as well as unreasonably large
!!   ones; "reasonable" bounds:
!!   \f{displaymath}{
!!    \tilde{\eta}_{k+1} = \max( 10^4\epsilon_M , \min(
!!    \eta_{k+1} , 0.9) ).
!!   \f}
!!   <li> Determine \f$\omega_k\f$ according to the globalization
!!   procedure.
!!   <li> Set
!!   \f{displaymath}{
!!    x^{(k+1)} = x^{(k)} +\omega_k z^{(k)}.
!!   \f}
!!   <li> We are now in position to restart the loop solving the next
!!   linear problem for \f$z^{(k+1)}\f$ with the tolerance
!!   \f{displaymath}{
!!    \|r_{lin}^{(k+1)}\| = \| f(x^{(k+1)}) + J_fz^{(k+1)} \| \leq
!!    \tilde{\eta}_{k+1}\| f(x^{(k+1)}) \|.
!!   \f}
!! </ul>
!!
!! \note The Newton solver passes the complete forcing term to the
!! linear one \f$\tilde{\eta}_k\|f(x^{(k)})\|\f$, so that to obtain a
!! Newton-Krylov method the linear solver must use an
!! <em>absolute</em> tolerance.
!!
!! \note The computations of \f$\omega_k\f$ and \f$\eta_k\f$ above are
!! independent from one another; it could be expected, however, that
!! when the linearization gives a good approximation one will find
!! \f$\omega_k\approx1\f$ and \f$\eta_k\approx0\f$, and vice versa. In
!! a more sophisticated formulation, the two coefficients can be
!! connected, as for instance in <a
!! href="http://dx.doi.org/10.1137/0917003">[Eisenstat, Walker,
!! 1996]</a>.
!!
!! \note Care must be taken in the specification of matching norms for
!! the linear and the nonlinear solvers. Typically, the linear solver
!! will refer to \f$\|\cdot\|_2\f$, since this quantity can be
!! obtained as a by-product of the computations; on the other hand,
!! \f$\|f(x)\|\f$ is any norm provided by the user in <tt>nres()</tt>.
!! The user-provided subroutine <tt>pres()</tt> must handle such
!! quantities consistently.
!!
!! \subsection preconditioning Effect of preconditioning
!!
!! When the linear problem is solved with a <em>preconditioned</em>
!! iterative solver, one effectively solves
!! \f{displaymath}{
!!  P^{-1}_kJ_f z^{(k)} = - P^{-1} f(x^{(k)}),
!! \f}
!! where it is assumed that the preconditioner can be different at
!! each Newton iteration. Noting that for a matrix \f$A\f$ which does
!! not depend on \f$x\f$ we have
!! \f{displaymath}{
!!  J_{Af} = A\,J_f,
!! \f}
!! we see that each Newton iteration can be regarded as approximating
!! the problem
!! \f{displaymath}{
!!  P^{-1}_k f(x) = 0,
!! \f}
!! which is equivalent to the original one. Thus, we can regard the
!! resulting algorithm as a sequence of single iteration Newton
!! solvers applied to problems which all share the same solution and
!! where each Newton solver provides the initial guess of the
!! following one. The Newton-Krylov procedure can still be applied,
!! provided that the following points are taken into account.
!! <ul>
!!   <li> The underlying hypothesis is that the "quality" of the
!!   preconditioning is comparable between subsequent iterations, i.e.
!!   \f{displaymath}{
!!    \| P^{-1}_k\Delta f^{(k)} - P^{-1}_kJ_f\Delta x^{(k)} \| \approx
!!    \| P^{-1}_{k+1}\Delta f^{(k+1)} - P^{-1}_{k+1}J_f\Delta x^{(k+1)} \|.
!!   \f}
!!   <li> The norm computed by <tt>nres</tt> includes the
!!   preconditioning used by the linear solver, i.e. \f$\|
!!   P^{-1}_kf(x) \|\f$.
!!   <li> The linear residual norm includes the preconditioning, i.e.
!!   \f$\| P^{-1}_kr_{lin}^{(k)} \|\f$ (notice that this is what is
!!   commonly returned by a preconditioned Krylov solver).
!! </ul>
!!
!! \section stop Stopping criterion
!!
!! The present implementation considers a stopping criterion based on
!! the increment norm and the tolerance is always considered as an
!! absolute value.
!<----------------------------------------------------------------------
module mod_newton

!-----------------------------------------------------------------------

 use mod_messages, only: &
   mod_messages_initialized, &
   error,   &
   warning, &
   info

 use mod_kinds, only: &
   mod_kinds_initialized, &
   wp

 use mod_mpi_utils, only: &
   mod_mpi_utils_initialized, &
   mpi_logical, mpi_integer, wp_mpi, &
   mpi_bcast

 use mod_state_vars, only: &
   mod_state_vars_initialized, &
   c_stv

!-----------------------------------------------------------------------
 
 implicit none

!-----------------------------------------------------------------------

! Module interface

 public :: &
   mod_newton_constructor, &
   mod_newton_destructor,  &
   mod_newton_initialized, &
   t_nwt_params, c_nwt, &
   newton

 private

!-----------------------------------------------------------------------

 ! Module types and parameters

 !> Newton problem
 !!
 !! Notice that \c c_nwt objects work as wrapper of \c c_stv ones, so
 !! that they can be used to store additional information, mostly
 !! concerning the residuals. See also the comments in \c newton.
 !!
 !! \note The solver parameters are not included in this type since
 !! there is no reason to duplicate them. Moreover, they should have
 !! intent in.
 type, abstract :: c_nwt
  class(c_stv), allocatable :: x
 contains
  !> Preconditioned residual \f$z=-J_f^{-1} f(x)\f$
  procedure(i_pres), deferred, pass(z) :: pres
  !> Residual norm \f$\|f(x)\|\f$
  procedure(i_nres), deferred, pass(x) :: nres
  !> Increment norm \f$\|x\|\f$
  procedure(i_norm), deferred, pass(x) :: norm
  !> Copy a \c c_stv object into a \c c_nwt one
  procedure(i_defy), deferred, pass(y) :: defy
  !> Print some data (for debugging)
  procedure, pass(x) :: show => nwt_show
  !> Copy constructor (compiler bug: see \c mod_state_vars)
  procedure(i_source), deferred, pass(x) :: source
 end type c_nwt

 !> Preconditioned residual
 !!
 !! \note Ideally, \c x should be <tt>intent(in)</tt> and \c z should
 !! be <tt>intent(out)</tt>. However, this subroutine could set an
 !! internal field in \c x (typically to avoid recomputing the
 !! residual) and the allocation status of the fields of \c z might be
 !! preserved, so both arguments are given <tt>intent(inout)</tt>.
 abstract interface
  subroutine i_pres(z,x,eta,linres,liniter)
   import :: wp, c_nwt
   implicit none
   class(c_nwt), intent(inout) :: x
   class(c_nwt), intent(inout) :: z
   real(wp), intent(in)  :: eta    !< forcing term
   real(wp), intent(out) :: linres !< linear solver residual
   integer, intent(out) :: liniter !< linear solver iterations
  end subroutine i_pres
 end interface

 !> Residual norm
 abstract interface
  function i_nres(x) result(n)
   import :: wp, c_nwt
   implicit none
   class(c_nwt), intent(inout) :: x
   real(wp) :: n
  end function i_nres
 end interface

 !> Norm
 abstract interface
  function i_norm(x) result(n)
   import :: wp, c_nwt
   implicit none
   class(c_nwt), intent(in) :: x
   real(wp) :: n
  end function i_norm
 end interface

 !> Definition
 abstract interface
  subroutine i_defy(y,x)
   import :: wp, c_stv, c_nwt
   implicit none
   class(c_stv), intent(in)    :: x
   class(c_nwt), intent(inout) :: y
  end subroutine i_defy
 end interface

 abstract interface
  pure subroutine i_source(y,x)
   import :: c_nwt
   implicit none
   class(c_nwt), intent(in)               :: x
   class(c_nwt), allocatable, intent(out) :: y
  end subroutine i_source
 end interface

 !> Solver parameters
 type t_nwt_params
   integer  :: nmax      !< maximum number of iterations
   real(wp) :: tolerance !< tolerance
 end type t_nwt_params

 ! Module variables

 ! public members
 logical, protected ::               &
   mod_newton_initialized = .false.
 ! private members
 character(len=*), parameter :: &
   this_mod_name = 'mod_newton'

!-----------------------------------------------------------------------

contains

!-----------------------------------------------------------------------

 subroutine mod_newton_constructor()

  character(len=*), parameter :: &
    this_sub_name = 'constructor'

   !Consistency checks ---------------------------
   if( (mod_messages_initialized.eqv..false.) .or. &
          (mod_kinds_initialized.eqv..false.) .or. &
      (mod_mpi_utils_initialized.eqv..false.) .or. &
     (mod_state_vars_initialized.eqv..false.) ) then
     call error(this_sub_name,this_mod_name, &
                'Not all the required modules are initialized.')
   endif
   if(mod_newton_initialized.eqv..true.) then
     call warning(this_sub_name,this_mod_name, &
                  'Module is already initialized.')
   endif
   !----------------------------------------------

   mod_newton_initialized = .true.
 end subroutine mod_newton_constructor

!-----------------------------------------------------------------------
 
 subroutine mod_newton_destructor()
  character(len=*), parameter :: &
    this_sub_name = 'destructor'
   
   !Consistency checks ---------------------------
   if(mod_newton_initialized.eqv..false.) then
     call error(this_sub_name,this_mod_name, &
                'This module is not initialized.')
   endif
   !----------------------------------------------

   mod_newton_initialized = .false.
 end subroutine mod_newton_destructor

!-----------------------------------------------------------------------
 
 !> Newton method to solve \f$f(x)=0\f$
 !!
 !! This subroutine requires two work variables \c z and \c xn which,
 !! in principle, are homogeneous to the unknown \c x. However, the
 !! user user will typically want to append to these variables
 !! information concerning the residuals, which can be reused for
 !! subsequent calls to \c pres and \c nres. For this reason, we
 !! proceed as follows:
 !! <ul>
 !!  <li> the type \c c_nwt includes an allocatable \c c_stv field
 !!  <li> the input/output argument \c x, of type \c c_stv, is copied in
 !!  a \c c_nwt object (the additional input argument \c xx), which is
 !!  used for all the computations, and then copied back into \c x
 !!  (this implies using some more memory than would be strictly
 !!  necessary using \c x as working array)
 !!  <li> the two local variables \c zz and \c xn are of type \c c_nwt
 !!  <li> the two local variables \c zz and \c xn are allocated using
 !!  \c xx as source, so that they conform to the layout decided by
 !!  the user.
 !! </ul>
 !! All the \c c_stv fields are allocated using \c x as source, so
 !! that any allocatable component is allocated correctly. Copying a
 !! \c c_stv object into a \c c_nwt one is then left to the user, so
 !! that he/she can ensure the consistency of any additional field.
 !!
 !! \note For parallel execution, the output variables \c lmaxiter, \c
 !! niter and \c res are meaningful only on the master process.
 subroutine newton(x,xx, params, lmaxiter, niter, res, mpi_id, mpi_comm)
  !> inital guess, overwritten with the solution
  class(c_stv), intent(inout) :: x
  !> wrapper for \c x: provides the source for the allocation of the
  !! local \c c_ntw working variables. Notice that <tt>xx\%x</tt> does
  !! not need to be allocated, and if it is then it is immediately
  !! deallocated.
  class(c_nwt), intent(inout) :: xx
  !> input parameters
  type(t_nwt_params), intent(in) :: params
  logical, intent(out) :: lmaxiter !< true if reached params%nmax
  !> number of nonlinear iterations (i.e. number of calls to
  !! <tt>lsolver</tt>)
  integer, intent(out) :: niter
  real(wp), allocatable, intent(out) :: res(:,:) !< residuals
  !> MPI rank and communicator
  integer, intent(in), optional :: mpi_id, mpi_comm

  logical, parameter :: verbose = .false.
  logical :: master
  logical :: reached_convergence, exit_main_loop, exit_omega_loop
  real(wp) :: tres(5,params%nmax+1), omega, nz, nrn, res_factor
  ! forcing term, linear solver residual and iterations
  integer :: liniter
  real(wp) :: eta, linres
  class(c_nwt), allocatable :: zz, xn
  real(wp), parameter :: omega_min = 0.001_wp
  character(len=10000) :: message(7)
  character(len=*), parameter :: &
    this_sub_name = 'newton'

   !--------------------------------------------------------------------
   ! 0) Initialization
   master = .true.
   if(present(mpi_id)) master = mpi_id.eq.0

   if(allocated(xx%x)) deallocate(xx%x)
   !allocate( xx%x , source=x ); call xx%defy(x)
   !allocate( zz , source=xx )
   !allocate( xn , source=xx )
   call x%source(xx%x); call xx%defy(x)
   call xx%source(zz)
   call xx%source(xn)

   nrn = xx%nres()
   if(master) then
     lmaxiter = .false.
     reached_convergence = .false.
     niter = 0 ! number of pres calls
     exit_main_loop = reached_convergence.or.lmaxiter
     eta = params%tolerance ! conservative choice for the first iter.
   endif
   tres = -1.0_wp
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   ! 1) Newton loop
   mainloop : do
    call synchronize(); if(exit_main_loop) exit mainloop

     !------------------------------------------------------------------
     ! 1.0) iteration setup
     if(master) then
       niter = niter+1
       ! Store some diagnostics which will change soon
       tres(1,niter) = nrn
       tres(4,niter) = eta
       ! variables set here in preparation for the omega_do loop
       omega = 1.0_wp; res_factor = 1.0_wp
     endif
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     ! 1.1) preconditioned residual
     call zz%pres( xx , eta*nrn , linres , liniter )
     nz = zz%norm() ! increment norm (stopping criteria)
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     ! 1.2) compute the new forcing term
     ! xn = x + z   (x is used as temporary array)
     call x%add( xx%x , zz%x ); call xn%defy( x )
     nrn = xn%nres() ! residual norm
     ! update the forcing term for the next iteration
     if(master) then
       ! use (2.2) which is easier to compute
       eta = abs(nrn-linres)/tres(1,niter)
       ! safeguard
       eta = max(1.0e4_wp*epsilon(eta),min(eta,0.9_wp))
     endif
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     ! 1.3) relaxation loop
     omega_do: do
       ! During the computation of the new forcing term, a first guess
       ! for xn has been computed using  omega = 1. This is the
       ! starting point for the relaxation procedure.
       if(master) then
         exit_omega_loop = nrn.le.(res_factor*tres(1,niter))
         if(.not.exit_omega_loop) then
           ! if we are here, we have to reduce omega
           omega      = 0.5_wp*omega
           res_factor = 1.1_wp*res_factor
           ! check whether omega is too small -> no point insisting
           exit_omega_loop = omega.lt.omega_min
           ! The previous check could be adapted including also the
           ! norms of zz and xx, i.e. check if  |omega*zz| << |xx|
           ! The problem is that this would require |xx|, which can be
           ! expensive to compute. Letting nx = |xx|, this would be
           ! exit_omega_loop = omega.lt.omega_min*nx/nz
         endif
       endif
      call synchronize(); if(exit_omega_loop) exit omega_do

       ! xn = x + omega*z   (x is used as temporary array)
       call x%alt( xx%x , omega , zz%x ); call xn%defy( x )
       nrn = xn%nres() ! residual norm
     enddo omega_do

     ! x = xn
     call xx%defy( xn%x )
     !------------------------------------------------------------------

     !------------------------------------------------------------------
     ! 1.4) convergence checks
     if(master) then
       tres(2,niter) = nz
       tres(3,niter) = omega
       tres(5,niter) = real(liniter,wp)
       !---------------------------------------------------------------
       ! Stopping criterion
       if(nz.le.params%tolerance) then
         ! The value of omega is not considered: this is a
         ! conservative approach to avoid exiting the method because
         ! of small omegas.
         reached_convergence = .true.
       else
         if(niter.ge.params%nmax) lmaxiter = .true.
       endif
       !---------------------------------------------------------------
       exit_main_loop = reached_convergence.or.lmaxiter

       if(verbose) then
         write(message(1),'(a,i5)') 'Newton iteration ',niter
         write(message(2),'(a,e12.3)') '  residual  ',tres(1,niter)
         write(message(3),'(a,e12.3)') '  new res.  ',nrn
         write(message(4),'(a,e12.3)') '  increment ',tres(2,niter)
         write(message(5),'(a,e12.3)') '  omega     ',tres(3,niter)
         write(message(6),'(a,e12.3)') '  forcing   ',tres(4,niter)
         write(message(7),'(a,e12.3)') '  iters     ',tres(5,niter)
         call info(this_sub_name,this_mod_name,message(1:7))
       endif
     endif
     !------------------------------------------------------------------

   enddo mainloop
   call x%copy( xx%x ) ! copy back the solution value
   !--------------------------------------------------------------------

   !--------------------------------------------------------------------
   ! 2) Clean-up and diagnostics
   deallocate(xx%x,zz,xn)

   if(master) then
     tres(1,niter+1) = nrn ! residual of the final solution
     allocate(res(size(tres,1),niter+1))
     res = tres(:,1:niter+1)
   endif
   !--------------------------------------------------------------------

 contains

  subroutine synchronize()

    integer, parameter :: &
     lbufd = 2, &
     rbufd = 2
    integer :: ierr
    logical  :: lbuf(lbufd)
    real(wp) :: rbuf(rbufd)

    if(present(mpi_comm)) then
      if(master) then
        lbuf(1) = exit_main_loop
        lbuf(2) = exit_omega_loop
        rbuf(1) = omega
        rbuf(2) = eta
      endif
      call mpi_bcast(lbuf,lbufd,mpi_logical,0,mpi_comm,ierr)
      call mpi_bcast(rbuf,rbufd,   wp_mpi  ,0,mpi_comm,ierr)
      if(.not.master) then
        exit_main_loop  = lbuf(1)
        exit_omega_loop = lbuf(2)
        omega           = rbuf(1)
        eta             = rbuf(2)
      endif
    endif

  end subroutine synchronize
 
 end subroutine newton
 
!-----------------------------------------------------------------------

 subroutine nwt_show(x)
  class(c_nwt), intent(in) :: x
  ! implement your own version if you need it
 end subroutine nwt_show

!-----------------------------------------------------------------------

end module mod_newton

