 SUBROUTINE TRANSPOSE_XY_Y(F,nxpsi,nxpse,nypsi,nypse, &
     &                     Q,imin,imax,jmin,jmax,nx,ny, &
     &                     comm3d,nproc,ndim,dims,coords, &
     &                     nxpi,nxpe,nypi,nype, &
     &                     nypi_y,nype_y)

!This subroutine transposes the XY two-dimensional array 
!decomposition into Y decomposition

 !use MPI
 use MPI_VARIABLES, only : &
 & transpose_xy_y_indic
 use TIMING_MOD, only : &
 & proc_time

 implicit none

 include 'mpif.h'

!Input variables 
 real(8), intent(in) :: F(nxpsi:nxpse,nypsi:nypse)
 integer, intent(in) :: nxpsi,nxpse,nypsi,nypse
 integer, intent(in) :: imin, imax
 integer, intent(in) :: jmin, jmax
 integer, intent(in) :: nx, ny
 integer, intent(in) :: ndim, nproc
 integer, intent(in) :: comm3d
 integer, intent(in) :: coords(1:3)
 integer, intent(in) :: dims(1:3)
 integer, intent(in) :: nxpi(0:nproc-1), nxpe(0:nproc-1)
 integer, intent(in) :: nypi(0:nproc-1), nype(0:nproc-1)
 integer, intent(in) :: nypi_y(0:nproc-1), nype_y(0:nproc-1)

!Output variables
 real(8), intent(out) :: Q(imin:imax,jmin:jmax)

!Local variables
 integer :: i, j, k
 integer :: ibuf,kbuf,tag,rrank,rankrecv,ierr
 integer :: status(MPI_STATUS_SIZE,2*nproc)
 integer, allocatable, save :: req2(:)
 integer :: myrank
 integer, save :: max_maxbuf

 integer, allocatable, save :: nprocs_to_send(:) ! The mask of processes to which the current
                                           ! process will send data
 integer, allocatable, save :: nprocs_to_recv(:)
 integer, allocatable, save :: nypi_to_send(:), nype_to_send(:) ! The bounds of data
                                                          ! that the current
                                                          ! process will send to
                                                          ! the processes
                                                          ! indicated by
                                                          ! nprocs_to_send mask
 integer, allocatable, save :: nypi_to_recv(:), nype_to_recv(:) ! The bounds of data
                                                          ! that the current
                                                          ! process will receive from
                                                          ! the processes
                                                          ! indicated by
                                                          ! nprocs_to_recv mask

 real(8), allocatable, save :: sarr(:,:) ! Send array
 real(8), allocatable, save :: rarr(:,:) ! Receive arrayi
 real(8) :: time00 ! Work variable
 real(8) :: pr_time, pro_time

 logical, save :: firstcall = .true.

! pr_time = MPI_WTIME()
 call cpu_time(pr_time)

 call MPI_COMM_RANK(comm3d, myrank, ierr)

 if (firstcall) then
!  Filling the mask of processes to which the current process
!  will send the data. Determining the bounds of data to be send
   allocate (nprocs_to_send(0:nproc-1))
   nprocs_to_send(:) = 0
   allocate (nypi_to_send(0:nproc-1), nype_to_send(0:nproc-1))
   do i = 0, nproc-1
     if ( (nypi(myrank) <= nypi_y(i) .and. nype(myrank) >= nype_y(i)) .or. &
     &    (nypi(myrank) >= nypi_y(i) .and. nypi(myrank) <= nype_y(i)) .or. &
     &    (nype(myrank) >= nypi_y(i) .and. nype(myrank) <= nype_y(i)) ) then
       nprocs_to_send(i) = 1
       nypi_to_send(i) = max(nypi(myrank),nypi_y(i))
       nype_to_send(i) = min(nype(myrank),nype_y(i))
     endif
   enddo

!  Filling the mask of processes from which the current process
!  will receive the data. Determining the bounds of data to be received
   allocate (nprocs_to_recv(0:nproc-1))
   nprocs_to_recv(:) = 0
   allocate (nypi_to_recv(0:nproc-1), nype_to_recv(0:nproc-1))
   do i = 0, nproc-1
     if ( (nypi(i) <= nypi_y(myrank) .and. nype(i) >= nype_y(myrank)) .or. &
     &    (nypi(i) >= nypi_y(myrank) .and. nypi(i) <= nype_y(myrank)) .or. &
     &    (nype(i) >= nypi_y(myrank) .and. nype(i) <= nype_y(myrank)) ) then
       nprocs_to_recv(i) = 1
       nypi_to_recv(i) = max(nypi(i),nypi_y(myrank))
       nype_to_recv(i) = min(nype(i),nype_y(myrank))
     endif
   enddo

!  Calculating the maximal send and receive buffer size
   max_maxbuf = 1
   do i = 0, nproc-1
     if (nprocs_to_send(i) == 1) then
       max_maxbuf = max(max_maxbuf, &
       & (nype_to_send(i) - nypi_to_send(i) + 1) * &
       & (nxpe(myrank) - nxpi(myrank) + 1))
     endif
   enddo
   do i = 0, nproc-1
     if (nprocs_to_recv(i) == 1) then
       max_maxbuf = max(max_maxbuf, &
       & (nype_to_recv(i) - nypi_to_recv(i) + 1) * &
       & (nxpe(i) - nxpi(i) + 1))
     endif
   enddo

   call MPI_ALLREDUCE(max_maxbuf,k,1,MPI_INTEGER,MPI_MAX,comm3d,ierr)
   max_maxbuf = k

   allocate (sarr(1:max_maxbuf, 0:nproc-1) )
   allocate (rarr(1:max_maxbuf, 0:nproc-1) )

   allocate (req2(1:2*nproc))
 endif


 tag = 1

! time00 = MPI_WTIME()

 do rrank = 0, nproc-1

   if (nprocs_to_send(rrank) == 1) then
   ! Sending the data to the processes specified above
     ibuf = 1
     do j = nypi_to_send(rrank), nype_to_send(rrank)
       do i = nxpi(myrank), nxpe(myrank)
         sarr(ibuf,rrank) = F(i,j)
         ibuf = ibuf + 1
       enddo
     enddo


!    if (firstcall) then
!    call MPI_send_init(sarr(1,rrank),maxbuf(myrank),MPI_DOUBLE_PRECISION, &
!    &                rrank,tag,comm3d,req2(icoord+nproc+1),ierr)

!    call MPI_recv_init(rarr(1,rankrecv),maxbuf(rankrecv),MPI_DOUBLE_PRECISION, &
!    &                rankrecv,tag,comm3d,req2(coordrecv(1)+1),ierr)

!    endif

!    call MPI_START(req2(icoord+nproc+1), ierr)
!    call MPI_START(req2(coordrecv(1)+1), ierr)


     call MPI_ISEND(sarr(1,rrank),max_maxbuf,MPI_DOUBLE_PRECISION, &
     &                rrank,tag,comm3d,req2(rrank+nproc+1),ierr)

   endif

   if (nprocs_to_recv(rrank) == 1) then
     ! Receiving the data from the processes specified above
     call MPI_IRECV(rarr(1,rrank),max_maxbuf,MPI_DOUBLE_PRECISION, &
     &                rrank,tag,comm3d,req2(rrank+1),ierr)
   endif

 enddo

! time_(1) = time_(1) + MPI_WTIME() - time00

! time00 = MPI_WTIME()

do rrank = 0, nproc-1
! Waiting for all receive operations to be accomplished
  if (nprocs_to_recv(rrank) == 1) then
    call MPI_WAIT(req2(rrank+1),status(1,rrank+1),ierr)
  endif
enddo

! time_(2) = time_(2) + MPI_WTIME() - time00

! time00 = MPI_WTIME()

do rrank = 0, nproc-1
  if (nprocs_to_recv(rrank) == 1) then
    ibuf = 1
    do j = nypi_to_recv(rrank), nype_to_recv(rrank)
      do i = nxpi(rrank), nxpe(rrank)
        Q(i,j) = rarr(ibuf,rrank)
        ibuf = ibuf + 1
      enddo
    enddo
  endif
enddo

! time_(3) = time_(3) + MPI_WTIME() - time00

! time00 = MPI_WTIME()

do rrank = 0, nproc-1
! Waiting for all send operations to be accomplished
  if (nprocs_to_send(rrank) == 1) then
    call MPI_WAIT(req2(rrank+nproc+1),status(1,rrank+nproc+1),ierr)
  endif
enddo

! time_(4) = time_(4) + MPI_WTIME() - time00

 call MPI_BARRIER(comm3d, ierr)

! proc_time(transpose_xy_yz_indic,2) = proc_time(transpose_xy_yz_indic,2) + &
! & MPI_WTIME() - pr_time

 call cpu_time(pro_time)
 proc_time(transpose_xy_y_indic,2) = proc_time(transpose_xy_y_indic,2) + &
 & pro_time - pr_time

 if (firstcall) firstcall = .false.
 END SUBROUTINE TRANSPOSE_XY_Y
