!==============================================================================
! Copyright (C) 2010, University Corporation for Atmospheric Research,
!                     Colorado State University,
!                     Los Alamos National Security, LLC,
!                     United States Department of Energy
!
! All rights reserved.  See ../COPYING for copyright details
!==============================================================================

module communicate

    ! !MODULE: communicate
    ! !DESCRIPTION:
    !  This module contains the necessary routines and variables for
    !  communicating between processors.  This instance of the module
    !  is for serial execution so not much is done.

    use kinds_mod, only: i4

    implicit none
    private
    save

    ! !PUBLIC MEMBER FUNCTIONS:

    public  :: init_communicate,          &
        exit_message_environment,  &
        abort_message_environment, &
        get_num_procs,             &
        create_communicator

    ! !PUBLIC DATA MEMBERS:

    integer (i4), public :: &
        MPI_COMM_OCN,             &! MPI communicator for ocn comms
        mpi_dbl,                  &! MPI type for dbl_kind
        my_task,                  &! MPI task number for this task
        master_task,              &! task number of master task
        cpl_task                   ! task number of coupler master task

    contains

    subroutine init_communicate
        ! !DESCRIPTION:
        !  This routine sets up MPI environment and defines ocean
        !  communicator.
        !-----------------------------------------------------------------------
        !
        !  local variables
        !
        !-----------------------------------------------------------------------

#ifdef coupled
            include 'mpif.h'   ! MPI Fortran include file
            integer (i4) :: ierr  ! MPI error flag
#endif

        !-----------------------------------------------------------------------
        !
        !  initiate mpi environment and create communicator for internal
        !  ocean communications
        !
        !-----------------------------------------------------------------------
#ifdef coupled
            call MPI_INIT(ierr)
            call create_ocn_communicator
            call MPI_COMM_RANK  (MPI_COMM_OCN, my_task, ierr)
#else
            my_task = 0
#endif

        master_task = 0

#ifdef coupled
            !-------------------------------------------------------------------
            !
            !  On some 64-bit machines where real_kind and dbl_kind are
            !  identical, the MPI implementation uses MPI_REAL for both.
            !  In these cases, set MPI_DBL to MPI_REAL.
            !
            !-------------------------------------------------------------------
            MPI_DBL = MPI_DOUBLE_PRECISION
#endif
    end subroutine init_communicate


    function get_num_procs()
        ! !DESCRIPTION:
        !  This function returns the number of processor assigned to
        !  the ocean model.

        ! !OUTPUT PARAMETERS:
        integer (i4) :: get_num_procs

        !-----------------------------------------------------------------------
        !
        !  serial execution, must be only 1
        !
        !-----------------------------------------------------------------------

        get_num_procs = 1
    end function get_num_procs

    subroutine exit_message_environment(ierr)
        ! !DESCRIPTION:
        !  This routine exits the message environment properly when model
        !  stops.

#ifdef coupled
            ! !INCLUDES:
            include 'mpif.h'   ! MPI Fortran include file
#endif

        ! !OUTPUT PARAMETERS:
        integer (i4), intent(out) :: ierr   ! MPI error flag

#ifdef coupled
            call MPI_FINALIZE(ierr)
#else
            ierr = 0
#endif
    end subroutine exit_message_environment


    subroutine abort_message_environment(ierr)
        ! !DESCRIPTION:
        !  This routine aborts the message environment when model stops.
        !  In coupled mode, it attempts to abort the entire coupled system.
        !
        ! !REVISION HISTORY:
        !  same as module

#ifdef coupled
            ! !INCLUDES:
            include 'mpif.h'   ! MPI Fortran include file
#endif

        ! !OUTPUT PARAMETERS:
        integer (i4), intent(out) :: ierr   ! MPI error flag

#ifdef coupled
            call MPI_BARRIER(MPI_COMM_OCN, ierr)
            call MPI_ABORT(MPI_COMM_WORLD, ierr)
            call MPI_FINALIZE(ierr)
#else
            ierr = 0
#endif
    end subroutine abort_message_environment


    subroutine create_ocn_communicator
        ! !DESCRIPTION:
        !  This routine queries all the tasks in MPI_COMM_WORLD to see
        !  which belong to the ocean.  In standalone mode, this should
        !  be all tasks, but in coupled mode POP needs to determine
        !  which tasks are assigned to the ocean component.
        !
        !  this routine should be called after mpi_init, but before
        !  setting up any internal mpi setups (since these will require
        !  the internal communicators returned by this routine)

#ifdef coupled
            ! !INCLUDES:
            include 'mpif.h'
#endif

#ifdef coupled
            !-------------------------------------------------------------------
            !
            !  local variables
            !
            !-------------------------------------------------------------------

            character (3) :: cmodel   ! model name temporary

            integer (i4) :: &
                MPI_GROUP_WORLD,       &! group id for MPI_COMM_WORLD
                MPI_GROUP_ATM,         &! group of processors assigned to atm
                MPI_GROUP_OCN,         &! group of processors assigned to ocn
                MPI_GROUP_ICE,         &! group of processors assigned to ice
                MPI_GROUP_LND,         &! group of processors assigned to lnd
                MPI_GROUP_CPL,         &! group of processors assigned to cpl
                MPI_COMM_ATM,          &! group of processors assigned to atm
                MPI_COMM_ICE,          &! group of processors assigned to ice
                MPI_COMM_LND,          &! group of processors assigned to lnd
                MPI_COMM_CPL            ! group of processors assigned to cpl

            integer (i4) :: &
                n,                     &! dummy loop counter
                ierr,                  &! error flag for MPI comms
                nprocs_all,            &! total processor count
                my_task_all,           &! rank of process in coupled domain
                ntasks_atm,            &! num tasks assigned to atm
                ntasks_ocn,            &! num tasks assigned to ocn
                ntasks_ice,            &! num tasks assigned to ice
                ntasks_lnd,            &! num tasks assigned to lnd
                ntasks_cpl              ! num tasks assigned to cpl

            integer (i4), dimension(3) :: &
                range_ocn,             &! range of tasks assigned to ocean
                range_atm,             &! range of tasks assigned to atmos
                range_ice,             &! range of tasks assigned to ice
                range_lnd,             &! range of tasks assigned to land
                range_cpl               ! range of tasks assigned to coupler

            !---------------------------------------------------------------
            !
            !  determine processor rank in full (coupled) domain
            !
            !---------------------------------------------------------------
            call MPI_COMM_RANK (MPI_COMM_WORLD, my_task_all, ierr)

            !---------------------------------------------------------------
            !
            !  determine which group of processes assigned to each model
            !  assume the first processor assigned to a model is the task that
            !  will communicate coupled model messages
            !
            !---------------------------------------------------------------
            call MPI_COMM_SIZE (MPI_COMM_WORLD, nprocs_all, ierr)

            ntasks_atm = 0
            ntasks_ocn = 0
            ntasks_ice = 0
            ntasks_lnd = 0
            ntasks_cpl = 0
            range_ocn(1) = nprocs_all
            range_atm(1) = nprocs_all
            range_ice(1) = nprocs_all
            range_lnd(1) = nprocs_all
            range_cpl(1) = nprocs_all
            range_ocn(2) = 0
            range_atm(2) = 0
            range_ice(2) = 0
            range_lnd(2) = 0
            range_cpl(2) = 0
            range_ocn(3) = 1
            range_atm(3) = 1
            range_ice(3) = 1
            range_lnd(3) = 1
            range_cpl(3) = 1

            !***
            !*** each processor broadcasts its model to all the processors
            !*** in the coupled domain
            !***
            do n=0,nprocs_all-1
                if (n == my_task_all) then
                    cmodel = 'ocn'
                else
                    cmodel = 'unk'
                endif

                call MPI_BCAST( &
                    cmodel, 3, MPI_CHARACTER, n, MPI_COMM_WORLD, ierr)

                select case(cmodel)
                    case ('ocn')
                        ntasks_ocn = ntasks_ocn + 1
                        range_ocn(1) = min(n,range_ocn(1))
                        range_ocn(2) = max(n,range_ocn(2))
                    case ('atm')
                        ntasks_atm = ntasks_atm + 1
                        range_atm(1) = min(n,range_atm(1))
                        range_atm(2) = max(n,range_atm(2))
                    case ('ice')
                        ntasks_ice = ntasks_ice + 1
                        range_ice(1) = min(n,range_ice(1))
                        range_ice(2) = max(n,range_ice(2))
                    case ('lnd')
                        ntasks_lnd = ntasks_lnd + 1
                        range_lnd(1) = min(n,range_lnd(1))
                        range_lnd(2) = max(n,range_lnd(2))
                    case ('cpl')
                        ntasks_cpl = ntasks_cpl + 1
                        range_cpl(1) = min(n,range_cpl(1))
                        range_cpl(2) = max(n,range_cpl(2))
                    case default
                        stop 'Unknown model name in comm setup'
                end select
            end do

            cpl_task = range_cpl(1)

            !---------------------------------------------------------------
            !
            !  create subroup and communicator for each models internal
            !  communciations, note that MPI_COMM_CREATE must be called by
            !  all processes in MPI_COMM_WORLD so this must be done by all
            !  models consistently and in the same order.
            !
            !---------------------------------------------------------------
            call MPI_COMM_GROUP(MPI_COMM_WORLD, MPI_GROUP_WORLD, ierr)

            if (ntasks_atm > 0) &
                call MPI_GROUP_RANGE_INCL(MPI_GROUP_WORLD, 1, range_atm, &
                    MPI_GROUP_ATM, ierr)

            if (ntasks_ocn > 0) &
                call MPI_GROUP_RANGE_INCL(MPI_GROUP_WORLD, 1, range_ocn,  &
                    MPI_GROUP_OCN, ierr)

            if (ntasks_ice > 0) &
                call MPI_GROUP_RANGE_INCL(MPI_GROUP_WORLD, 1, range_ice,  &
                    MPI_GROUP_ICE, ierr)

            if (ntasks_lnd > 0) &
                call MPI_GROUP_RANGE_INCL(MPI_GROUP_WORLD, 1, range_lnd,  &
                    MPI_GROUP_LND, ierr)

            if (ntasks_cpl > 0) &
                call MPI_GROUP_RANGE_INCL(MPI_GROUP_WORLD, 1, range_cpl,  &
                    MPI_GROUP_CPL, ierr)

            if (ntasks_atm > 0) &
                call MPI_COMM_CREATE (MPI_COMM_WORLD, MPI_GROUP_ATM,  &
                    MPI_COMM_ATM, ierr)

            if (ntasks_ocn > 0) &
                call MPI_COMM_CREATE (MPI_COMM_WORLD, MPI_GROUP_OCN,  &
                    MPI_COMM_OCN, ierr)

            if (ntasks_ice > 0) &
                call MPI_COMM_CREATE (MPI_COMM_WORLD, MPI_GROUP_ICE,  &
                    MPI_COMM_ICE, ierr)

            if (ntasks_lnd > 0) &
                call MPI_COMM_CREATE (MPI_COMM_WORLD, MPI_GROUP_LND,  &
                    MPI_COMM_LND, ierr)

            if (ntasks_cpl > 0) &
                call MPI_COMM_CREATE (MPI_COMM_WORLD, MPI_GROUP_CPL,  &
                    MPI_COMM_CPL, ierr)
#else
            MPI_COMM_OCN = 0
            cpl_task = 0
#endif
    end subroutine create_ocn_communicator

    subroutine create_communicator(new_comm, num_procs)
        ! !DESCRIPTION:
        !  This routine creates a separate communicator for a subset of
        !  processors under default ocean communicator.
        !
        !  this routine should be called from init_domain1 when the
        !  domain configuration (e.g. nprocs_btrop) has been determined

#ifdef coupled
            ! !INCLUDES:
            include 'mpif.h'
#endif

        ! !INPUT PARAMETERS:
        integer (i4), intent(in) :: &
        num_procs         ! num of procs in new distribution

        ! !OUTPUT PARAMETERS:
        integer (i4), intent(out) :: &
        new_comm          ! new communicator for this distribution

#ifdef coupled
            !---------------------------------------------------------------
            !
            !  local variables
            !
            !---------------------------------------------------------------

            integer (i4) :: &
                MPI_GROUP_OCN,  &! group of processors assigned to ocn
                MPI_GROUP_NEW    ! group of processors assigned to new dist
            integer (i4) :: &
                ierr                    ! error flag for MPI comms
            integer (i4), dimension(3) :: &
                range               ! range of tasks assigned to new dist
                                    !  (assumed 0,num_procs-1)

            !---------------------------------------------------------------
            !
            !  determine group of processes assigned to distribution
            !
            !---------------------------------------------------------------
            call MPI_COMM_GROUP (MPI_COMM_OCN, MPI_GROUP_OCN, ierr)

            range(1) = 0
            range(2) = num_procs-1
            range(3) = 1

            !---------------------------------------------------------------
            !
            !  create subroup and communicator for new distribution
            !  note: MPI_COMM_CREATE must be called by all procs in
            !   MPI_COMM_OCN
            !
            !---------------------------------------------------------------
            call MPI_GROUP_RANGE_INCL(MPI_GROUP_OCN, 1, range, &
                MPI_GROUP_NEW, ierr)

            call MPI_COMM_CREATE (MPI_COMM_OCN, MPI_GROUP_NEW,  &
                new_comm, ierr)

#else
            new_comm = MPI_COMM_OCN
#endif
    end subroutine create_communicator

end module communicate
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
