      SUBROUTINE MPI_INITIALIZE

      use MPI_VARIABLES

      implicit none
      
      call MPI_INIT(ierr_MPI) 
!      write(*,*) 'MPI_INIT passed'
      call MPI_COMM_SIZE(MPI_COMM_WORLD,size_MPI,ierr_MPI) 
!      write(*,*) 'MPI_COMM_SIZE passed'
      call MPI_COMM_RANK(MPI_COMM_WORLD,rank_MPI,ierr_MPI) 
!      write(*,*) 'MPI_COMM_RANK passed'
      call MPI_GET_PROCESSOR_NAME(proc_name,len_MPI,ierr_MPI) 
!      write(*,*) 'MPI_GET_PROCESSOR_NAME passed'
!      write(*,*) 'My processor name is', rank_MPI, proc_name
      
      END SUBROUTINE MPI_INITIALIZE
      
      
      SUBROUTINE CARTESIAN_GRID_CREATE(nx,ny,ns,iophis)
      use MPI_VARIABLES
      implicit none

!     Input variables
      integer, intent(in) :: nx,ny,ns
      integer, intent(in) :: iophis

!     Local variables
      integer i,j,k
      integer numhoris,counter,irank
      integer, allocatable:: rankhoris(:)
      logical dimlog(3)
      
      call MPI_CHECK(nx,ny,ns,iophis)
      dims(1) = nprocx
      dims(2) = nprocy
      dims(3) = nprocs
      call MPI_CART_CREATE (MPI_COMM_WORLD,ndim,dims,isperiodic, &
     & reorder,COMM3D,ierr_MPI)

!      if (COMM3D==MPI_COMM_NULL) then
!       write(*,*) 'MPI_COMM_NULL ', 'rank_MPI = ', rank_MPI
!       if (rankout /= rank_MPI) then
!        write(0,*) 'Error: rankout /= rank_MPI of the process with
!     &  MPI_COMM_NULL in Cartesian communicator: STOP'
!        STOP
!       endif
!      endif

       call MPI_CART_GET(COMM3D,ndim,dims,isperiodic,coords, &
     &  ierr_MPI)
       call MPI_CART_RANK(COMM3D,coords,rank_comm3d,ierr_MPI)
       coordn(1) = 0; coordn(2) = 0; coordn(3) = 0
       call MPI_CART_RANK(COMM3D,coordn,rank_comm3d_000,ierr_MPI)
       coordn(1) = dims(1)-1; coordn(2) = dims(2)-1; coordn(3) = 0
       call MPI_CART_RANK(COMM3D,coordn,rank_comm3d_110,ierr_MPI)

       call MPA_MYCRDS &
     & ( &                   
     & nx,    ny,    ns, &   
     & nxsh,  nysh,  nssh, & 
     & nxsh2, nysh2, nssh2, &

     & dims,  coords, &      

     & nxi, nxe, &           
     & nyi, nye, &           
     & nsi, nse, &           
     & nx0i, nx1e, &         
     & ny0i, ny1e, &         
     & ns0i, ns1e, &
     & nx2e, ny2e, & 
     & nxsi, nxse, &         
     & nysi, nyse, &         
     & nssi, nsse, &         
     & nxs2i, nxs2e, &       
     & nys2i, nys2e, &       
     & nss2i, nss2e, &       
     & nx0si, nx1se, &       
     & ny0si, ny1se, &       
     & ns0si, ns1se, &       
     & ns0i_xproc, ns1e_xproc, &
     & ns0i_yproc, ns1e_yproc & 
     & )

      allocate (nxpi(0:size_MPI-1), nxpe(0:size_MPI-1))
      allocate (nypi(0:size_MPI-1), nype(0:size_MPI-1))
      allocate (nspi(0:size_MPI-1), nspe(0:size_MPI-1))
      allocate (nxpi_ys(0:size_MPI-1), nxpe_ys(0:size_MPI-1))
      allocate (nypi_ys(0:size_MPI-1), nype_ys(0:size_MPI-1))
      allocate (nspi_ys(0:size_MPI-1), nspe_ys(0:size_MPI-1))
      allocate (nxpi_xs(0:size_MPI-1), nxpe_xs(0:size_MPI-1))
      allocate (nypi_xs(0:size_MPI-1), nype_xs(0:size_MPI-1))
      allocate (nspi_xs(0:size_MPI-1), nspe_xs(0:size_MPI-1))
      allocate (ksize(0:size_MPI-1))
      allocate (isize(0:size_MPI-1))
      allocate (jsize(0:size_MPI-1))
      allocate (nxpi_x(0:size_MPI-1),nxpe_x(0:size_MPI-1))
      allocate (nypi_y(0:size_MPI-1),nype_y(0:size_MPI-1))

       call POIS_BOUNDS(nx,ny,ns,dims,size_MPI,comm3d, &
     &  nxpi,nxpe,nypi,nype,nspi,nspe, &
     &  nxpi_ys,nxpe_ys,nypi_ys,nype_ys,nspi_ys,nspe_ys, &
     &  nxpi_xs,nxpe_xs,nypi_xs,nype_xs,nspi_xs,nspe_xs, &
     &  ksize,isize,jsize,nxpi_x,nxpe_x,nypi_y,nype_y)
        nx0i_x = nxpi_x(rank_comm3d)
        nx1e_x = nxpe_x(rank_comm3d)
        ny0i_y = nypi_y(rank_comm3d)
        ny1e_y = nype_y(rank_comm3d)

!       print*, 'MYCRDS',coords(1:3)
!       print*, 'nxi = ', nxi, 'nxe = ', nxe
!       print*, 'nyi = ', nyi, 'nye = ', nye
!       print*, 'nsi = ', nsi, 'nse = ', nse

       horisbound = .false.
       xlbound    = .false.
       xrbound    = .false.
       ylbound    = .false.
       yrbound    = .false.
       xbound     = .false.
       ybound     = .false.
       if (coords(1) == 0)         xlbound = .true.
       if (coords(1) == dims(1)-1) xrbound = .true.
       if (coords(2) == 0)         ylbound = .true.
       if (coords(2) == dims(2)-1) yrbound = .true.
       xbound = (xlbound .or. xrbound)
       ybound = (ylbound .or. yrbound)
       horisbound = (xbound .or. ybound)
       
       
       lowbound = .false.
       if (coords(3)==dims(3)-1) lowbound = .true.

       upperbound = .false.
       if (coords(3)==0) upperbound = .true.

!      Creating three 2d cartesian topologies
       dimlog(1)=.true.
       dimlog(2)=.true.
       dimlog(3)=.false.
       call MPI_CART_SUB(COMM3D,dimlog,COMM2DS,ierr_MPI)
       dimlog(1)=.true.
       dimlog(2)=.false.
       dimlog(3)=.true.
       call MPI_CART_SUB(COMM3D,dimlog,COMM2DY,ierr_MPI)
       dimlog(1)=.false.
       dimlog(2)=.true.
       dimlog(3)=.true.
       call MPI_CART_SUB(COMM3D,dimlog,COMM2DX,ierr_MPI)
       
!      CREATING THE COMMUNICATOR FOR PROCESSES AT THE HORISONTAL BOUNDARIES
       call MPI_COMM_GROUP(COMM3D,group_comm3d,ierr_MPI)

       numhoris = 2*(nprocx+nprocy)*nprocs - 4*nprocs
       allocate (rankhoris(numhoris))
       counter = 0
      
       do i = 0,dims(1)-1,dims(1)-1
         do j = 0,dims(2)-1
           do k = 0,dims(3)-1
             coordn(1)=i;coordn(2)=j;coordn(3)=k
             call MPI_CART_RANK(COMM3D,coordn,irank,ierr_MPI)
             counter = counter + 1
             rankhoris(counter) = irank
             if (coordn(1)==0.and.coordn(2)==0.and.coordn(3)==0) &
     &         rankhor000 = counter-1
             if (coordn(1)==dims(1)-1.and.coordn(2)==dims(2)-1.and. &
     &           coordn(3)==dims(3)-1) &
     &         rankhor111 = counter-1             
           enddo
         enddo
       enddo

       if (.not.dims(1)<=2) then
       do j = 0,dims(2)-1,dims(2)-1
         do i = 1,dims(1)-2
           do k = 0,dims(3)-1
             coordn(1)=i;coordn(2)=j;coordn(3)=k
             call MPI_CART_RANK(COMM3D,coordn,irank,ierr_MPI)
             counter = counter + 1
             rankhoris(counter) = irank
           enddo
         enddo
       enddo
       endif
       
       call MPI_GROUP_INCL(group_comm3d,numhoris,rankhoris,group_horis, &
     &  ierr_MPI)
       call MPI_COMM_CREATE(comm3d,group_horis,commhoris,ierr_MPI)
       if (horisbound) &
     &  call MPI_COMM_RANK(commhoris,irank,ierr_MPI)
       deallocate(rankhoris)
   
!      CREATING THE TWO COMMUNICATORS FOR PROCESSES AT X-BOUNDARIES (LEFT AND RIGHT)
       numhoris = dims(2)
       allocate (rankhoris(numhoris))
       do i = 0, dims(1)-1, dims(1)-1
         counter = 0
         do j = 0, dims(2)-1
           coordn(1)=i; coordn(2)=j; coordn(3)=0 ! Here the XY-decomposition of the domain is assumed
           call MPI_CART_RANK(COMM3D,coordn,irank,ierr_MPI)
           counter = counter + 1
           rankhoris(counter) = irank
         enddo
         if (i == 0) k = 1
         if (i == dims(1)-1) k = 2
         call MPI_GROUP_INCL(group_comm3d,numhoris,rankhoris, &
     &    group_xbound(k), ierr_MPI)
         call MPI_COMM_CREATE(comm3d,group_xbound(k),comm_xbound(k), &
     &    ierr_MPI)
!         if (xlbound) call MPI_COMM_RANK(comm_xbound(1),rank_xbound(1),ierr_MPI)
!         if (xrbound) call MPI_COMM_RANK(comm_xbound(2),rank_xbound(2),ierr_MPI)
       enddo
       deallocate (rankhoris)

!      CREATING THE TWO COMMUNICATORS FOR PROCESSES AT Y-BOUNDARIES (LEFT AND RIGHT)
       numhoris = dims(1)
       allocate (rankhoris(numhoris))
       do j = 0, dims(2)-1, dims(2)-1
         counter = 0
         do i = 0, dims(1)-1
           coordn(1)=i; coordn(2)=j; coordn(3)=0 ! Here the XY-decomposition of the domain is assumed
           call MPI_CART_RANK(COMM3D,coordn,irank,ierr_MPI)
           counter = counter + 1
           rankhoris(counter) = irank
         enddo
         if (j == 0) k = 1
         if (j == dims(2)-1) k = 2
         call MPI_GROUP_INCL(group_comm3d,numhoris,rankhoris, &
     &    group_ybound(k), ierr_MPI)
         call MPI_COMM_CREATE(comm3d,group_ybound(k),comm_ybound(k), &
     &    ierr_MPI)
!         if (xlbound) call MPI_COMM_RANK(comm_xbound(1),rank_xbound(1),ierr_MPI)
!         if (xrbound) call MPI_COMM_RANK(comm_xbound(2),rank_xbound(2),ierr_MPI)
       enddo

!       if (irank==rankhor000) print*, '000 process!', coords
!       if (irank==rankhor111) print*, '111 process!', coords
!       print*, coords,commhoris,MPI_COMM_NULL
       
!       print*, 'numhoris', numhoris, counter
!       call MPI_CART_GET(COMM2DX,2,dims2d,isperiodic2d,coords2d,
!     &  ierr_MPI)
!       print*, 'COMM2DX',coords,coords2d

!       call MPI_CART_GET(COMM2DY,2,dims2d,isperiodic2d,coords2d,
!     &  ierr_MPI)
!       print*, 'COMM2DY',coords,coords2d

!       call MPI_CART_GET(COMM2DS,2,dims2d,isperiodic2d,coords2d,
!     &  ierr_MPI)
!       print*, 'COMM2DS',coords,coords2d
       
!       print*, 'IN 3D', coords(1:3), COMM2D

!      STOP 'STOP in Cartesian_grid_create'
            
      END SUBROUTINE CARTESIAN_GRID_CREATE



      SUBROUTINE MPI_CHECK(nx,ny,ns,iophis)
      use MPI_VARIABLES
      implicit none

!     Input variables
      integer, intent(in) :: nx, ny, ns
      integer, intent(in) :: iophis

!     Local variables      
      integer widthx, widthy, widths, i, j, count
 
      data count /0/
     
      if (rank_MPI == rankout) print*, 'size_MPI=',size_MPI
      
      if (nprocx*nprocy*nprocs /= size_MPI) then
       print*, 'Error: number of launched processors must be equal &
     &  to nprocx*nprocy*nprocs'
       print*, 'nprocx = ', nprocx
       print*, 'nprocy = ', nprocy
       print*, 'nprocs = ', nprocs
       print*, 'The number of launched processes is ', size_MPI
       STOP 'STOP'
      endif
     
     
!      if (mod(nx,nprocx)/=0) then
!       print*, 'nx must be divided by nprocx without residual: STOP!'   
!       STOP
!      endif
      
!      if (mod(ny,nprocy)/=0) then
!       print*, 'ny must be divided by nprocy without residual: STOP!'   
!       STOP
!      endif
      
      widthx = int(float(nx)/float(nprocx))
      widthy = int(float(ny)/float(nprocy))
      widths = int(float(ns)/float(nprocs))
      if (widthx<=4) then                                               
       print*, 'Too much processes in x-direction: STOP'
       STOP
      endif
      if (widthy<=4) then
       print*, 'Too much processes in y-direction: STOP'
       STOP
      endif
      if (widths<=4) then
       print*, 'Too much processes in sigma-direction: STOP'
       STOP
      endif

      if ((nprocx*nprocy > nx .or. nprocx*nprocy > ny) .and. iophis == 3) then
        write(*,*) 'The number of processes must not exceed the &
        & length of the grid in x or y direction', nx, ny, nprocx*nprocy
        STOP
      endif
      
      END SUBROUTINE MPI_CHECK

      
      SUBROUTINE ARR2DEXCH &
      & (y,xsi,xse,ysi,yse, &
      & nxi,nxe,nyi,nye, &
      & shiftx,shifty, &
      & comm3d,nproc,ndim,prplcxy, &
      & request_send,request_recv)

!     Subroutine ARR2DEXCH performs MPI-exchanges at the edges
!     of subdomains (arrays) of two-dimensional field

!      use MPI
      use MPI_VARIABLES, only : &
      & arr2dexch_indic
      use TIMING_MOD, only : &
      & proc_time

      implicit none

      include 'mpif.h'

!     Input variables
!     Integers

      integer(4), intent(in) :: xsi, xse
      integer(4), intent(in) :: ysi, yse
      integer(4), intent(in) :: nxi, nxe
      integer(4), intent(in) :: nyi, nye
      integer(4), intent(in) :: shiftx, shifty

      integer(4), intent(in) :: comm3d
      integer(4), intent(in) :: nproc
      integer(4), intent(in) :: ndim
      integer(4), intent(inout) :: prplcxy(1:8)
      integer(4), intent(inout) :: request_send(1:8), request_recv(1:8)

!     Input/output variables
!     Reals
      real(8), intent(inout) :: y(xsi:xse,ysi:yse)

!     Local variables
!     Integers
      integer(4), parameter :: identife_min = 2001
      integer(4), parameter :: identife_max = 4000

      integer(4) :: req2(1:1000) 
      integer(4), save :: identife = identife_min
      integer(4) :: ierr
      integer(4) :: i0, i1, j0, j1

!     Reals
      real(8), allocatable :: x(:,:)
      real(8) :: pr_time, pro_time

      i0 = max(nxi-shiftx,xsi)
      i1 = min(nxe+shiftx,xse)
      j0 = max(nyi-shifty,ysi)
      j1 = min(nye+shifty,yse)

      allocate (x(nxi-shiftx:nxe+shiftx,nyi-shifty:nye+shifty))
      x(i0:i1,j0:j1) = y(i0:i1,j0:j1)

!      pr_time = MPI_WTIME()
      call cpu_time(pr_time)

      call MPA_EXCHXY(x,nxi,nxe,nyi,nye,1,1, &
      &               shiftx,shifty,0,1,identife, &
      &               comm3d,nproc,ndim,req2,prplcxy, &
      &               request_send,request_recv)

!      proc_time(arr2dexch_indic,2) = proc_time(arr2dexch_indic,2) + &
!      & MPI_WTIME() - pr_time
      call cpu_time(pro_time)
      proc_time(arr2dexch_indic,2) = proc_time(arr2dexch_indic,2) + &
      & pro_time - pr_time

      identife = identife + 1
      if (identife == identife_max) identife = identife_min
      
      y(i0:i1,j0:j1) = x(i0:i1,j0:j1)

      deallocate(x)

      prplcxy(:) = 0

      call MPI_BARRIER(comm3d,ierr)

      END SUBROUTINE ARR2DEXCH


      SUBROUTINE ARR3DEXCH(y,xsi,xse,ysi,yse,ssi,sse, &
     & nxi,nxe,nyi,nye,nsi,nse, &
     & shiftx,shifty,shifts, &
     & nxmin,nxmax,nymin,nymax,nsmin,nsmax, & 
     & nproc,comm3d,req,ndim,prplc, &
     & request_send,request_recv)

      ! Subroutine ARR3DEXCH performs MPI-exchanges at the edges
      ! of subdomains (arrays) of three-dimensional field

!      use MPI
      use MPI_VARIABLES, only : &
      & arr3dexch_indic, &
      & maxshift
      use TIMING_MOD, only : &
      & proc_time

      implicit none

      include 'mpif.h'

      integer(4), intent(in) :: xsi, xse
      integer(4), intent(in) :: ysi, yse
      integer(4), intent(in) :: ssi, sse
      integer(4), intent(in) :: nxi, nxe
      integer(4), intent(in) :: nyi, nye
      integer(4), intent(in) :: nsi, nse
      integer(4), intent(in) :: shiftx, shifty, shifts
      integer(4), intent(in) :: nxmin, nxmax
      integer(4), intent(in) :: nymin, nymax
      integer(4), intent(in) :: nsmin, nsmax
      integer(4), intent(inout) :: request_send(1:maxshift,1:26)
      integer(4), intent(inout) :: request_recv(1:maxshift,1:26)

      real(8), intent(inout) :: y(xsi:xse,ysi:yse,ssi:sse)

      integer(4), intent(in) :: nproc,comm3d,ndim
      integer(4), intent(inout) :: req(1:52,1:2000)
      integer(4), intent(inout) :: prplc(1:26)

!     Local variables

      real(8), allocatable :: work(:,:,:)
      real(8) :: pr_time, pro_time

      integer(4), parameter :: identif_min = 1
      integer(4), parameter :: identif_max = 2000

      integer(4) :: i0, i1, j0, j1, k0, k1
      integer(4), save :: identif = identif_min
      integer(4) :: ierr


      i0 = max(nxi-shiftx,xsi)
      i1 = min(nxe+shiftx,xse)
      j0 = max(nyi-shifty,ysi)
      j1 = min(nye+shifty,yse)
      k0 = max(nsi-shifts,ssi)
      k1 = min(nse+shifts,sse)

      allocate (work(i0:i1,j0:j1,k0:k1))

      work = y(i0:i1,j0:j1,k0:k1)

      identif = identif + 1
      if (identif == identif_max) identif = identif_min

!      pr_time = MPI_WTIME()
      call cpu_time(pr_time)

!      call MPA_EXCH(work,nxi,nxe,nyi,nye,nsi,nse, &
!      &             i0,i1, &
!      &             j0,j1, &
!      &             k0,k1, &
!      &             shiftx,shifty,shifts,identif, &
!      &             comm3d,req,nproc,ndim, &
!      &             prplc)

      call MPA_EXCH_NEW(work,nxi,nxe,nyi,nye,nsi,nse, &
      &                 i0,i1, &
      &                 j0,j1, &
      &                 k0,k1, &
      &                 nxmin,nxmax,nymin,nymax,nsmin,nsmax, &
      &                 shiftx,shifty,shifts,identif, &
      &                 comm3d,req,nproc,ndim, &
      &                 prplc,request_send,request_recv, &
      &                 maxshift)

!      proc_time(arr3dexch_indic,2) = proc_time(arr3dexch_indic,2) + &
!      & MPI_WTIME() - pr_time
      call cpu_time(pro_time)

      proc_time(arr3dexch_indic,2) = proc_time(arr3dexch_indic,2) + &
      & pro_time - pr_time

      y(i0:i1,j0:j1,k0:k1) = work

      deallocate(work)

      prplc(:) = 0

!      call MPI_BARRIER(comm3d,ierr)

      END SUBROUTINE ARR3DEXCH


      SUBROUTINE HORIS_SUM(summa,nbound)
      use mpi_variables
      implicit none
      real(8) summa,summa1
      integer nbound

      if (nbound==1) then
       if (xlbound) then
        call MPI_ALLREDUCE(summa,summa1,1,MPI_DOUBLE_PRECISION, &
     &   MPI_SUM,COMM2DX,ierr_MPI)
        summa = summa1
       endif
!       coordn(1)=0;coordn(2)=0;coordn(3)=0
!       call MPI_CART_RANK(comm3d,coordn,rankn,ierr_MPI)
       call MPI_BCAST(summa,1,MPI_DOUBLE_PRECISION,rankhor000, &
     &  commhoris,ierr_MPI)   
     
      elseif (nbound==2) then
       if (xrbound) then
        call MPI_ALLREDUCE(summa,summa1,1,MPI_DOUBLE_PRECISION, &
     &   MPI_SUM,COMM2DX,ierr_MPI)
        summa = summa1
       endif
!       coordn(1)=dims(1)-1;coordn(2)=dims(2)-1;coordn(3)=0
!       call MPI_CART_RANK(comm3d,coordn,rankn,ierr_MPI)
       call MPI_BCAST(summa,1,MPI_DOUBLE_PRECISION,rankhor111, &
     &  commhoris,ierr_MPI) 
     
      elseif (nbound==3) then
       if (ylbound) then
        call MPI_ALLREDUCE(summa,summa1,1,MPI_DOUBLE_PRECISION, &
     &   MPI_SUM,COMM2DY,ierr_MPI)
        summa = summa1
       endif
!       coordn(1)=0;coordn(2)=0;coordn(3)=0
!       call MPI_CART_RANK(comm3d,coordn,rankn,ierr_MPI)
       call MPI_BCAST(summa,1,MPI_DOUBLE_PRECISION,rankhor000, &
     &  commhoris,ierr_MPI)
     
      elseif (nbound==4) then
       if (yrbound) then
        call MPI_ALLREDUCE(summa,summa1,1,MPI_DOUBLE_PRECISION, &
     &   MPI_SUM,COMM2DY,ierr_MPI)
        summa = summa1 
       endif
!       coordn(1)=dims(1)-1;coordn(2)=dims(2)-1;coordn(3)=0
!       call MPI_CART_RANK(comm3d,coordn,rankn,ierr_MPI)
       call MPI_BCAST(summa,1,MPI_DOUBLE_PRECISION,rankhor111, &
     &  commhoris,ierr_MPI)
     
      endif

      END SUBROUTINE HORIS_SUM


      SUBROUTINE TICK_CHECK(coords,time_interval,time_tick)
      implicit none

!     Input variables
      integer(4), intent(in) :: coords(1:3) ! The coordinates of the calling process

      real(8), intent(in) :: time_interval
      real(8), intent(in) :: time_tick

!     Local variables
      real(8), parameter :: n_ticks = 10.d0

      if (time_interval/time_tick < n_ticks) then
        print*, 'WARNING: the time interval is too small &
     &	 to be measured, given the time tick'
      endif

      END SUBROUTINE TICK_CHECK
