module latlon_parallel_mod

  use mpi
  use const_mod
  use latlon_mesh_mod
  use latlon_process_mod

  implicit none

  private

  public fill_halo
  public zonal_sum
  public global_sum
  public global_min
  public gather
  public barrier

  interface fill_halo
    module procedure fill_halo_2d_r8
    module procedure fill_halo_3d_r8
  end interface fill_halo

  interface zonal_sum
    module procedure zonal_sum_0d_r8
    module procedure zonal_sum_1d_r8
  end interface zonal_sum

  interface global_sum
    module procedure global_sum_0d_r8
    module procedure global_sum_1d_r8
    module procedure global_sum_2d_r8
    module procedure global_sum_3d_r8
  end interface global_sum

  interface global_min
    module procedure global_min_1d_r8
    module procedure global_min_1d_i4
  end interface global_min

  interface gather
    module procedure gather_2d_r8
  end interface gather

contains

  subroutine fill_halo_2d_r8(array, west_halo, east_halo, south_halo, north_halo)

    real(8), intent(inout) :: array(:,:)
    logical, intent(in), optional :: west_halo
    logical, intent(in), optional :: east_halo
    logical, intent(in), optional :: south_halo
    logical, intent(in), optional :: north_halo

    integer status(MPI_STATUS_SIZE), ierr

    if (merge(west_halo, .true., present(west_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(east)%send_type_2d, proc%halo(east)%proc_id, 3, &
                        array, 1, proc%halo(west)%recv_type_2d, proc%halo(west)%proc_id, 3, &
                        proc%comm, status, ierr)
    end if

    if (merge(east_halo, .true., present(east_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(west)%send_type_2d, proc%halo(west)%proc_id, 7, &
                        array, 1, proc%halo(east)%recv_type_2d, proc%halo(east)%proc_id, 7, &
                        proc%comm, status, ierr)
    end if

    if (merge(south_halo, .true., present(south_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(north)%send_type_2d, proc%halo(north)%proc_id, 11, &
                        array, 1, proc%halo(south)%recv_type_2d, proc%halo(south)%proc_id, 11, &
                        proc%comm, status, ierr)
    end if

    if (merge(north_halo, .true., present(north_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(south)%send_type_2d, proc%halo(south)%proc_id, 15, &
                        array, 1, proc%halo(north)%recv_type_2d, proc%halo(north)%proc_id, 15, &
                        proc%comm, status, ierr)
    end if

  end subroutine fill_halo_2d_r8

  subroutine fill_halo_3d_r8(array, west_halo, east_halo, south_halo, north_halo)

    real(8), intent(inout) :: array(:,:,:)
    logical, intent(in), optional :: west_halo
    logical, intent(in), optional :: east_halo
    logical, intent(in), optional :: south_halo
    logical, intent(in), optional :: north_halo

    integer status(MPI_STATUS_SIZE), ierr

    if (merge(west_halo, .true., present(west_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(east)%send_type_3d, proc%halo(east)%proc_id, 3, &
                        array, 1, proc%halo(west)%recv_type_3d, proc%halo(west)%proc_id, 3, &
                        proc%comm, status, ierr)
    end if

    if (merge(east_halo, .true., present(east_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(west)%send_type_3d, proc%halo(west)%proc_id, 7, &
                        array, 1, proc%halo(east)%recv_type_3d, proc%halo(east)%proc_id, 7, &
                        proc%comm, status, ierr)
    end if

    if (merge(south_halo, .true., present(south_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(north)%send_type_3d, proc%halo(north)%proc_id, 11, &
                        array, 1, proc%halo(south)%recv_type_3d, proc%halo(south)%proc_id, 11, &
                        proc%comm, status, ierr)
    end if

    if (merge(north_halo, .true., present(north_halo))) then
      call MPI_SENDRECV(array, 1, proc%halo(south)%send_type_3d, proc%halo(south)%proc_id, 15, &
                        array, 1, proc%halo(north)%recv_type_3d, proc%halo(north)%proc_id, 15, &
                        proc%comm, status, ierr)
    end if

  end subroutine fill_halo_3d_r8

  subroutine zonal_sum_0d_r8(zonal_circle, work, value)

    type(latlon_zonal_circle_type), intent(in) :: zonal_circle
    real(8), intent(in) :: work(:)
    real(8), intent(out) :: value

    integer ierr

    call MPI_ALLREDUCE(sum(work), value, 1, MPI_DOUBLE, MPI_SUM, zonal_circle%comm, ierr)

  end subroutine zonal_sum_0d_r8

  subroutine zonal_sum_1d_r8(zonal_circle, work, value)

    type(latlon_zonal_circle_type), intent(in) :: zonal_circle
    real(8), intent(in) :: work(:,:)
    real(8), intent(out) :: value(:)

    integer ierr

    call MPI_ALLREDUCE(sum(work, dim=1), value, size(value), MPI_DOUBLE, MPI_SUM, zonal_circle%comm, ierr)

  end subroutine zonal_sum_1d_r8

  subroutine global_sum_0d_r8(comm, value)

    integer, intent(in) :: comm
    real(8), intent(inout) :: value

    integer ierr
    real(8) res

    call MPI_ALLREDUCE(value, res, 1, MPI_DOUBLE, MPI_SUM, comm, ierr)
    value = res

  end subroutine global_sum_0d_r8

  subroutine global_sum_1d_r8(comm, array)

    integer, intent(in) :: comm
    real(8), intent(inout) :: array(:)

    integer ierr
    real(8), allocatable :: res(:)

    allocate(res(size(array)))
    call MPI_ALLREDUCE(array, res, size(array), MPI_DOUBLE, MPI_SUM, comm, ierr)
    array = res
    deallocate(res)

  end subroutine global_sum_1d_r8

  subroutine global_sum_2d_r8(comm, array)

    integer, intent(in) :: comm
    real(8), intent(inout) :: array(:,:)

    integer ierr
    real(8), allocatable :: res(:,:)

    allocate(res(size(array,1),size(array,2)))
    call MPI_ALLREDUCE(array, res, size(array), MPI_DOUBLE, MPI_SUM, comm, ierr)
    array = res
    deallocate(res)

  end subroutine global_sum_2d_r8

  subroutine global_sum_3d_r8(comm, array)

    integer, intent(in) :: comm
    real(8), intent(inout) :: array(:,:,:)

    integer ierr
    real(8), allocatable :: res(:,:,:)

    allocate(res(size(array,1),size(array,2),size(array,3)))
    call MPI_ALLREDUCE(array, res, size(array), MPI_DOUBLE, MPI_SUM, comm, ierr)
    array = res
    deallocate(res)

  end subroutine global_sum_3d_r8

  subroutine global_min_1d_r8(comm, array)

    integer, intent(in) :: comm
    real(8), intent(inout) :: array(:)

    integer ierr
    real(8), allocatable :: res(:)

    allocate(res(size(array)))
    call MPI_ALLREDUCE(array, res, size(array), MPI_DOUBLE, MPI_MIN, comm, ierr)
    array = res
    deallocate(res)

  end subroutine global_min_1d_r8

  subroutine global_min_1d_i4(comm, array)

    integer, intent(in) :: comm
    integer, intent(inout) :: array(:)

    integer ierr

    call MPI_ALLREDUCE(MPI_IN_PLACE, array, size(array), MPI_INT, MPI_MIN, comm, ierr)

  end subroutine global_min_1d_i4

  subroutine gather_2d_r8(comm, jbeg, jend, local_array, global_array)

    integer, intent(in) :: comm
    integer, intent(in) :: jbeg
    integer, intent(in) :: jend
    real(8), intent(in) :: local_array(:,:)
    real(8), intent(out) :: global_array(:,:)

    integer jbegs(proc%np)
    integer jends(proc%np)
    integer recvcounts(proc%np)
    integer displs(proc%np)
    integer i, ierr

    call MPI_GATHER(jbeg, 1, MPI_INT, jbegs, 1, MPI_INT, 0, proc%comm, ierr)
    call MPI_GATHER(jend, 1, MPI_INT, jends, 1, MPI_INT, 0, proc%comm, ierr)

    if (proc%is_root_proc()) then
      do i = 1, proc%np
        recvcounts(i) = (jends(i) - jbegs(i) + 1) * size(local_array, 1)
      end do
      displs(1) = 0
      do i = 2, proc%np
        displs(i) = displs(i-1) + recvcounts(i-1)
      end do
    end if

    call MPI_GATHERV(local_array, size(local_array), MPI_DOUBLE, global_array, &
                     recvcounts, displs, MPI_DOUBLE, 0, proc%comm, ierr)

  end subroutine gather_2d_r8

  subroutine barrier()

    integer ierr

    call MPI_BARRIER(proc%comm, ierr)

  end subroutine barrier

end module latlon_parallel_mod
