SUBROUTINE ARRSECTGET &
& (array,array_suppl,ix0,ix1,iy0,iy1,is0,is1, &
&  ixx0,ixx1,iyy0,iyy1,iss0,iss1, &
&  i0,i1,j0,j1,k0,k1, &
&  nxsh, nysh, nzsh, &
&  comm3d, size_MPI, coords, dims, &
&  neighborhood, isperiodic)

! Subroutine gets the required section of array from
! neighbouring process and sends array sections to
! neighbouring processes that they need.
! Two-dimensional decomposition is assumed

! Note that it is assumed the required array section either
! lies entirely in the array or does not overlap with it at all.
! In the latter case supplementary array ie used.
! The partial overlap of array section needed with the
! local array is not treated by this subroutine correctly.

!use MPI

implicit none

include 'mpif.h'

!Input variables

integer(4), intent(in) :: ix0,ix1,iy0,iy1,is0,is1 ! Array bounds
integer(4), intent(in) :: ixx0,ixx1,iyy0,iyy1,iss0,iss1 ! Array bounds of "inner domain"
integer(4), intent(in) :: i0,i1,j0,j1,k0,k1 ! Array section bounds,
                                            ! needed to get from neighbouring process
integer(4), intent(in) :: nxsh, nysh, nzsh

integer(4), intent(in) :: comm3d, size_MPI
integer(4), intent(in) :: coords(1:3)
integer(4), intent(in) :: dims(1:3)
integer(4), intent(in) :: neighborhood

logical, intent(in) :: isperiodic(1:3)

! Input/output vaariables

real(8), intent(inout) :: array(ix0:ix1,iy0:iy1,is0:is1)

! Output variables

real(8), intent(out) :: array_suppl(i0:i1,j0:j1,k0:k1)

! Local variables

real(8), allocatable, save :: worksend(:,:), workrecv(:,:)

integer(4) :: numneighbors
integer(4), parameter :: nodata = -999
integer(4), allocatable :: ranks(:) 
integer(4) :: irank, irank0
integer(4) :: coordn(1:3)
integer(4) :: arrbounds(1:6), arrbounds_inmydomain(1:6), coords_mydom(1:6), &
& coords_mydom_ext(1:6)
integer(4), allocatable :: arrbounds_initsdomain(:,:), arrboundsneig(:,:)
integer(4) :: tagsend, tagsend2, tagsend3, tagrecv, tagrecv2, tagrecv3
integer(4), allocatable :: requestsend(:), requestsend2(:), &
& requestsend3(:), requestrecv(:), &
& requestrecv2(:), requestrecv3(:)
integer(4) :: istatus(1:MPI_STATUS_SIZE)
integer(4) :: ierr
integer(4) :: i, j, k, n ! Loop indices
integer(4) :: iwork
integer(4) :: icoordxmin, icoordxmax
integer(4) :: icoordymin, icoordymax

logical, allocatable :: flagrecv(:), flagssend(:), &
& flagsrecv(:), flagsrecv2(:), hasdata(:)
logical :: inmydomain, inmyextdomain_arrbounds, worklog
logical, save :: firstcall = .true.

logical, external :: MPA_BCHK


allocate (ranks(1:size_MPI))
k = 0
if (neighborhood == 1) then
  icoordxmin = max(coords(1)-1,0)
  icoordxmax = min(coords(1)+1,dims(1)-1)
  icoordymin = max(coords(2)-1,0)
  icoordymax = min(coords(2)+1,dims(2)-1)
elseif (neighborhood == 2) then
  icoordxmin = 0
  icoordxmax = dims(1)-1
  icoordymin = 0
  icoordymax = dims(2)-1
endif
do j = icoordymin, icoordymax
  do i = icoordxmin, icoordxmax
    if ( .not. (i == coords(1) .and. j == coords(2)) ) then
      coordn(1) = i; coordn(2) = j; coordn(3) = coords(3)
      k = k + 1
      call MPI_CART_RANK(comm3d,coordn,ranks(k),ierr)
    endif
  enddo
enddo
numneighbors = k

allocate (arrbounds_initsdomain(1:6,1:numneighbors))
allocate (arrboundsneig(1:6,1:numneighbors))
allocate (requestsend(1:numneighbors), requestsend2(1:numneighbors))
allocate (requestsend3(1:numneighbors), requestrecv(1:numneighbors))
allocate (requestrecv2(1:numneighbors), requestrecv3(1:numneighbors))
allocate (flagrecv(1:numneighbors), flagssend(1:numneighbors))
allocate (flagsrecv(1:numneighbors), flagsrecv2(1:numneighbors), &
& hasdata(1:numneighbors))

requestsend = MPI_REQUEST_NULL
requestsend2 = MPI_REQUEST_NULL
requestsend3 = MPI_REQUEST_NULL
requestrecv = MPI_REQUEST_NULL
requestrecv2 = MPI_REQUEST_NULL
requestrecv3 = MPI_REQUEST_NULL

iwork = (ixx1 - ixx0 + 1)*(iyy1 - iyy0 + 1)*(iss1 - iss0 + 1)
call MPI_ALLREDUCE(iwork,n,1,MPI_INTEGER,MPI_MAX,comm3d,ierr)
allocate (worksend(1:n,1:numneighbors), workrecv(1:n,1:numneighbors))

arrbounds(1) = i0
arrbounds(2) = i1
arrbounds(3) = j0
arrbounds(4) = j1
arrbounds(5) = k0
arrbounds(6) = k1

coords_mydom(1) = ixx0
coords_mydom(2) = ixx1
coords_mydom(3) = iyy0
coords_mydom(4) = iyy1
coords_mydom(5) = iss0
coords_mydom(6) = iss1

coords_mydom_ext(1) = ix0
coords_mydom_ext(2) = ix1
coords_mydom_ext(3) = iy0
coords_mydom_ext(4) = iy1
coords_mydom_ext(5) = is0
coords_mydom_ext(6) = is1

inmyextdomain_arrbounds = DOMOVERLAP(coords_mydom_ext,arrbounds)

! Tags for array section coordinates, needed by processes
tagsend = 1
tagrecv = tagsend

! Tags for array section coordinates, overlapping with domain of the process
tagsend2 = 2
tagrecv2 = tagsend2

! Tags for array section
tagsend3 = 3
tagrecv3 = tagsend3

do irank = 1, numneighbors
  call MPI_ISEND(arrbounds,6,MPI_INTEGER,ranks(irank), &
  & tagsend,comm3d,requestsend(irank),ierr)
  call MPI_IRECV(arrbounds_initsdomain(1,irank),6,MPI_INTEGER,ranks(irank), &
  & tagrecv2,comm3d,requestrecv2(irank),ierr)
  call MPI_IRECV(arrboundsneig(1,irank),6,MPI_INTEGER,ranks(irank), &
  & tagrecv,comm3d,requestrecv(irank),ierr)
enddo

flagssend(:) = .false.
irank0 = 1
c1: do while (.true.)

  do irank  = irank0, numneighbors
    call MPI_TEST(requestrecv(irank),flagrecv(irank),istatus,ierr)
    if (flagrecv(irank) .and. flagssend(irank) == .false.) then
      ! Checking if my "inner" domain overlaps with section needed bu neighbor
      inmydomain = DOMOVERLAP(arrboundsneig(1,irank), coords_mydom)
!      write(*,*) inmydomain, numneighbors
      if ( (.not.inmydomain) .or. arrboundsneig(1,irank) == nodata) then
        arrbounds_inmydomain(1:6) = nodata
        call MPI_ISEND(arrbounds_inmydomain,6,MPI_INTEGER,ranks(irank), &
        & tagsend2,comm3d,requestsend2(irank),ierr)
      else
        arrbounds_inmydomain(1) = max(min(arrboundsneig(1,irank),ixx1),ixx0)
        arrbounds_inmydomain(2) = max(min(arrboundsneig(2,irank),ixx1),ixx0)
        arrbounds_inmydomain(3) = max(min(arrboundsneig(3,irank),iyy1),iyy0)
        arrbounds_inmydomain(4) = max(min(arrboundsneig(4,irank),iyy1),iyy0)
        arrbounds_inmydomain(5) = max(min(arrboundsneig(5,irank),iss1),iss0)
        arrbounds_inmydomain(6) = max(min(arrboundsneig(6,irank),iss1),iss0)
        call MPI_ISEND(arrbounds_inmydomain,6,MPI_INTEGER,ranks(irank), &
        & tagsend2,comm3d,requestsend2(irank),ierr)
        iwork = (arrbounds_inmydomain(2) - arrbounds_inmydomain(1) + 1) * &
        &       (arrbounds_inmydomain(4) - arrbounds_inmydomain(3) + 1) * &
        &       (arrbounds_inmydomain(6) - arrbounds_inmydomain(5) + 1)
        n = 0
        do k = arrbounds_inmydomain(5), arrbounds_inmydomain(6)
          do j = arrbounds_inmydomain(3), arrbounds_inmydomain(4)
            do i = arrbounds_inmydomain(1), arrbounds_inmydomain(2)
              n = n + 1
              worksend(n,irank) = array(i,j,k)
            enddo
          enddo
        enddo
        call MPI_ISEND(worksend(1,irank),iwork,MPI_DOUBLE_PRECISION,ranks(irank), &
        & tagsend3,comm3d,requestsend3(irank),ierr)
      endif
      flagssend(irank) = .true.
    endif

  enddo
  
  do irank  = irank0, numneighbors
    if (.not.flagssend(irank)) then
      irank0 = irank
      cycle c1
    endif
  enddo

  exit c1

enddo c1


flagsrecv(:) = .false.
irank0 = 1
c2: do while (.true.)

  do irank = irank0, numneighbors
    call MPI_TEST(requestrecv2(irank),flagrecv(irank),istatus,ierr)
    if (flagrecv(irank) .and. flagsrecv(irank) == .false.) then
      if (arrbounds_initsdomain(1,irank) /= nodata) then
!       This process has some data for me
        hasdata(irank) = .true.
        iwork = (arrbounds_initsdomain(2,irank) - arrbounds_initsdomain(1,irank) + 1) * &
        &       (arrbounds_initsdomain(4,irank) - arrbounds_initsdomain(3,irank) + 1) * &
        &       (arrbounds_initsdomain(6,irank) - arrbounds_initsdomain(5,irank) + 1)
        call MPI_IRECV(workrecv(1,irank),iwork,MPI_DOUBLE_PRECISION,ranks(irank), &
        & tagrecv3,comm3d,requestrecv3(irank),ierr)
      else
        hasdata(irank) = .false.
      endif
      flagsrecv(irank) = .true.
    endif

  enddo

  do irank  = irank0, numneighbors
    if (.not.flagsrecv(irank)) then
      irank0 = irank
      cycle c2
    endif
  enddo

  exit c2

enddo c2


! Now I receive array sections from processes
flagsrecv2(:) = .false.
irank0 = 1
c3: do while (.true.)

  do irank = irank0, numneighbors
    if (hasdata(irank)) then
      call MPI_TEST(requestrecv3(irank),flagrecv(irank),istatus,ierr)
      if (flagrecv(irank) .and. flagsrecv2(irank) == .false.) then
        n = 0
        if (inmyextdomain_arrbounds) then
!         Needed coordinates lie in my domain, copying data in the array
          do k = arrbounds_initsdomain(5,irank), arrbounds_initsdomain(6,irank)
            do j = arrbounds_initsdomain(3,irank), arrbounds_initsdomain(4,irank)
              do i = arrbounds_initsdomain(1,irank), arrbounds_initsdomain(2,irank)
                n = n + 1
                array(i,j,k) = workrecv(n,irank)
              enddo
            enddo
          enddo
        else
!         Needed coordinates do not lie in my domain: using supplementary
!         array
          do k = arrbounds_initsdomain(5,irank), arrbounds_initsdomain(6,irank)
            do j = arrbounds_initsdomain(3,irank), arrbounds_initsdomain(4,irank)
              do i = arrbounds_initsdomain(1,irank), arrbounds_initsdomain(2,irank)
                n = n + 1
                array_suppl(i,j,k) = workrecv(n,irank)
              enddo
            enddo
          enddo
        endif
        flagsrecv2(irank) = .true.
      endif
    else
      flagsrecv2(irank) = .true.
    endif
  enddo

  do irank  = irank0, numneighbors
    if (.not.flagsrecv2(irank)) then
      irank0 = irank
      cycle c3
    endif
  enddo

  exit c3

enddo c3

do irank = 1, numneighbors
  call MPI_WAIT (requestsend(irank), istatus, ierr)
  call MPI_WAIT (requestsend2(irank), istatus, ierr)
  call MPI_WAIT (requestsend3(irank), istatus, ierr) 
  call MPI_WAIT (requestrecv(irank), istatus, ierr)
  call MPI_WAIT (requestrecv2(irank), istatus, ierr)
  call MPI_WAIT (requestrecv3(irank), istatus, ierr)
enddo

deallocate (worksend, workrecv)
deallocate (ranks)

deallocate (arrbounds_initsdomain)
deallocate (arrboundsneig)
deallocate (requestsend, requestsend2)
deallocate (requestsend3, requestrecv)
deallocate (requestrecv2, requestrecv3)
deallocate (flagrecv, flagssend)
deallocate (flagsrecv, flagsrecv2, hasdata)


if (firstcall) firstcall = .false.
return
contains


FUNCTION DOMOVERLAP(coords1,coords2)

implicit none

integer(4), intent(in) :: coords1(1:6), coords2(1:6)

logical :: DOMOVERLAP

DOMOVERLAP = &
& ( (coords1(1) >= coords2(1) .and. coords1(1) <= coords2(2)) .or. &
&   (coords1(2) >= coords2(1) .and. coords1(2) <= coords2(2)) .or. &
&   (coords1(1) <= coords2(1) .and. coords1(2) >= coords2(2)) ) .and. &
& ( (coords1(3) >= coords2(3) .and. coords1(3) <= coords2(4)) .or. &
&   (coords1(4) >= coords2(3) .and. coords1(4) <= coords2(4)) .or. &
    (coords1(3) <= coords2(3) .and. coords1(4) >= coords2(4))) .and.&
& ( (coords1(5) >= coords2(5) .and. coords1(5) <= coords2(6)) .or. &
&   (coords1(6) >= coords2(5) .and. coords1(6) <= coords2(6)) .or. &
&   (coords1(5) <= coords2(5) .and. coords1(6) >= coords2(6)))

END FUNCTION DOMOVERLAP


END SUBROUTINE ARRSECTGET
