module fiona_dataset_mod

  use fiona_common_mod
  use fiona_dim_mod
  use fiona_var_mod

  implicit none

  type dataset_type
    integer :: id                     = -1
    logical :: async                  = .false.
    character(30) name
    character(256) :: desc            = 'N/A'
    character(256) :: author          = 'N/A'
    character(256) :: file_path       = 'N/A'
    character(256) :: file_prefix     = 'N/A'
    character(256) :: last_file_path  = ''
    type(dim_type), pointer :: time_dim => null()
    type(var_type), pointer :: time_var => null()
    type(hash_table_type) atts
    type(hash_table_type) dims
    type(hash_table_type) vars
    integer :: time_step              = 0
    real(8) :: time_in_seconds        = -1
    real(8) time_units_in_seconds
    character(30) :: time_units_str   = 'N/A'
    character(30) :: start_time_str   = 'N/A'
    ! Parallel IO
#ifdef HAS_MPI
    character(30) :: hostname         = 'N/A'
    integer :: global_mpi_comm        = MPI_COMM_NULL
    integer :: mpi_comm               = MPI_COMM_NULL
    integer :: nproc                  = 0
    integer :: proc_id                = MPI_PROC_NULL
#endif
    logical :: is_parallel            = .false.
    ! Group IO
    integer :: ngroups                = 1
#ifdef HAS_MPI
    ! MPI communicator group_mpi_comm is used to gather or scatter data to group processes.
    integer :: group_mpi_comm         = MPI_COMM_NULL
#endif
    integer :: group_id               = -1
    integer :: group_nproc            = 1
    integer :: group_proc_id          = 0
    logical :: is_group_root          = .true.
    integer, allocatable :: group_start(:,:)
    integer, allocatable :: group_count(:,:)
    integer, allocatable :: group_send_req(:)
    integer, allocatable :: group_recv_req(:)
  contains
    procedure :: init      => dataset_init
    procedure :: open      => dataset_open
    procedure :: is_open   => dataset_is_open
    procedure :: wait_send => dataset_wait_send
    procedure :: wait_recv => dataset_wait_recv
    procedure :: close     => dataset_close
    procedure :: get_dim   => dataset_get_dim
    procedure :: get_var   => dataset_get_var
    procedure :: clear     => dataset_clear
    final :: dataset_final
  end type dataset_type

contains

  subroutine dataset_init(this, name, desc, mpi_comm, ngroups, async)

    class(dataset_type), intent(inout) :: this
    character(*), intent(in) :: name
    character(*), intent(in), optional :: desc
    integer, intent(in), optional :: mpi_comm
    integer, intent(in), optional :: ngroups
    logical, intent(in), optional :: async

    integer n, ierr

    this%name = name
    if (present(desc)) this%desc = desc
    if (present(async)) this%async = async ! The group root process will do only IO operations.
    call this%atts%init()
    call this%dims%init()
    call this%vars%init()

#ifdef HAS_MPI
    if (present(mpi_comm)) then
      ! Get process size and rank.
      if (mpi_comm /= MPI_COMM_NULL) then
        call MPI_COMM_SIZE(mpi_comm, this%nproc, ierr)
        call MPI_COMM_RANK(mpi_comm, this%proc_id, ierr)
        call MPI_GET_PROCESSOR_NAME(this%hostname, n, ierr)
      end if
      this%global_mpi_comm = mpi_comm
      ! Set group size.
      if (present(ngroups)) then
        if (ngroups >= 1 .and. ngroups <= this%nproc) then
          this%ngroups = ngroups
        else
          ! In default, we use groups as many as processes, that is each
          ! process has its own group.
          this%ngroups = this%nproc
        end if
      end if
      ! Ensure each group has same number of processes.
      if (mod(this%nproc, this%ngroups) /= 0) then
        if (mod(this%nproc, 2) == 0) then
          this%ngroups = this%nproc / 2
        else if (mod(this%nproc, 3) == 0) then
          this%ngroups = this%nproc / 3
        else if (mod(this%nproc, 5) == 0) then
          this%ngroups = this%nproc / 5
        else if (mod(this%nproc, 7) == 0) then
          this%ngroups = this%nproc / 7
        else
          call log_error('Please revise dataset ngroups ' // to_str(this%ngroups) // '!', __FILE__, __LINE__)
        end if
        if (this%proc_id == 0) call log_notice('Reset dataset ngroups to ' // to_str(this%ngroups) // '.')
      end if
#ifdef NO_PARALLEL
      this%ngroups = 1
#endif
      if (mpi_comm /= MPI_COMM_NULL) then
        ! Split processes into small groups.
        this%group_id = round_robin(this%nproc, this%ngroups, this%proc_id)
        call MPI_COMM_SPLIT(mpi_comm, this%group_id, this%proc_id, this%group_mpi_comm, ierr)
        if (ierr /= 0) then
          call log_error('Failed to create MPI groups for reading!', __FILE__, __LINE__)
        end if
        call MPI_COMM_RANK(this%group_mpi_comm, this%group_proc_id, ierr)
        call MPI_COMM_SIZE(this%group_mpi_comm, this%group_nproc, ierr)
        this%is_group_root = this%group_proc_id == 0
        ! Create a communicator for group roots.
        call MPI_COMM_SPLIT(mpi_comm, merge(this%group_proc_id, MPI_UNDEFINED, this%is_group_root), this%proc_id, this%mpi_comm, ierr)
        if (ierr /= 0) then
          call log_error('Failed to create MPI group for group roots!', __FILE__, __LINE__)
        end if
#ifndef NO_PARALLEL
        if (this%ngroups > 1) this%is_parallel = .true.
#endif
      end if
    end if
#endif
    ! Allocate these two arrays no matter we are using MPI or not.
    allocate(this%group_start(max_ndim,this%group_nproc)); this%group_start = 0
    allocate(this%group_count(max_ndim,this%group_nproc)); this%group_count = 0
#ifdef HAS_MPI
    allocate(this%group_send_req(this%group_nproc)); this%group_send_req = MPI_REQUEST_NULL
    allocate(this%group_recv_req(this%group_nproc)); this%group_recv_req = MPI_REQUEST_NULL
#endif

  end subroutine dataset_init

  subroutine dataset_open(this)

    class(dataset_type), intent(inout) :: this

    integer ierr

    if (.not. this%is_group_root) return

    if (this%is_open()) call this%close()
#ifdef HAS_MPI
    if (.not. this%is_parallel) then
      ierr = NF90_OPEN(this%file_path, ior(NF90_NOWRITE, NF90_NETCDF4), this%id)
    else
      ierr = NF90_OPEN(this%file_path, ior(NF90_NOWRITE, ior(NF90_NETCDF4, NF90_MPIIO)), this%id, &
                       comm=this%mpi_comm, info=MPI_INFO_NULL)
    end if
#else
    ierr = NF90_OPEN(this%file_path, ior(NF90_NOWRITE, NF90_NETCDF4), this%id)
#endif
    if (ierr == -51 .or. & ! Uknown file format error
        ierr == -114) then ! Parallel operation on file opened for non-parallel access CODE
      ierr = NF90_OPEN(this%file_path, ior(NF90_NOWRITE, NF90_NETCDF4), this%id)
      this%is_parallel = .false.
    end if
    call handle_error(ierr, 'Failed to open NetCDF file "' // trim(this%file_path) // '"!', __FILE__, __LINE__)

  end subroutine dataset_open

  logical function dataset_is_open(this) result(res)

    class(dataset_type), intent(in) :: this

    integer ierr

    if (this%is_group_root) then
      ierr = NF90_INQUIRE(this%id)
      res = ierr == NF90_NOERR
    else
      res = .false.
    end if

  end function dataset_is_open

  subroutine dataset_wait_send(this)

    class(dataset_type), intent(inout) :: this

    integer i, ierr

#ifdef HAS_MPI
    do i = 1, this%group_nproc
      if (this%group_send_req(i) /= MPI_REQUEST_NULL) then
        call MPI_WAIT(this%group_send_req(i), MPI_STATUS_IGNORE, ierr)
        this%group_send_req(i) = MPI_REQUEST_NULL
      end if
    end do
#endif

  end subroutine dataset_wait_send

  subroutine dataset_wait_recv(this, i)

    class(dataset_type), intent(inout) :: this
    integer, intent(in) :: i

    integer ierr

#ifdef HAS_MPI
    if (this%group_recv_req(i) /= MPI_REQUEST_NULL) then
      call MPI_WAIT(this%group_recv_req(i), MPI_STATUS_IGNORE, ierr)
      this%group_recv_req(i) = MPI_REQUEST_NULL
    end if
#endif

  end subroutine dataset_wait_recv

  subroutine dataset_close(this)

    class(dataset_type), intent(inout) :: this

    integer ierr

    if (this%is_group_root .and. this%is_open()) then
      ierr = NF90_CLOSE(this%id)
      call handle_error(ierr, 'Failed to close file "' // trim(this%file_path) // '"!', __FILE__, __LINE__)
      this%id = -1
    end if

  end subroutine dataset_close

  function dataset_get_dim(this, dim_name) result(res)

    class(dataset_type), intent(in) :: this
    character(*), intent(in) :: dim_name
    type(dim_type), pointer :: res

    res => null()
    select type (value => this%dims%value(dim_name))
    type is (dim_type)
      res => value
    end select

  end function dataset_get_dim

  function dataset_get_var(this, var_name) result(res)

    class(dataset_type), intent(in) :: this
    character(*), intent(in) :: var_name
    type(var_type), pointer :: res

    res => null()
    select type (value => this%vars%value(var_name))
    type is (var_type)
      res => value
    class default
      call log_error('Variable ' // trim(var_name) // ' is not in dataset!', __FILE__, __LINE__)
    end select

  end function dataset_get_var

  subroutine dataset_clear(this)

    class(dataset_type), intent(inout) :: this

    integer ierr

    nullify(this%time_dim)
    nullify(this%time_var)
    call this%atts%clear()
    call this%dims%clear()
    call this%vars%clear()
    if (allocated(this%group_start)) deallocate(this%group_start)
    if (allocated(this%group_count)) deallocate(this%group_count)
    if (allocated(this%group_send_req)) deallocate(this%group_send_req)
    if (allocated(this%group_recv_req)) deallocate(this%group_recv_req)

  end subroutine dataset_clear

  subroutine dataset_final(this)

    type(dataset_type), intent(inout) :: this

    call this%clear()

  end subroutine dataset_final

  subroutine gather_start_count(dataset, ndim, array_shape, start, count)

    type(dataset_type), intent(inout) :: dataset
    integer, intent(in) :: ndim
    integer, intent(in) :: array_shape(ndim)
    integer, intent(in), optional :: start(ndim)
    integer, intent(in), optional :: count(ndim)

    integer i, ierr
    integer start_(ndim), count_(ndim)
    integer, allocatable :: recv_count(:), recv_offset(:)

    if (present(start) .and. present(count)) then
      start_ = start
      count_ = count
    else
      start_ = 1
      count_ = array_shape
    end if
#ifdef HAS_MPI
    if (dataset%group_mpi_comm /= MPI_COMM_NULL) then
      allocate(recv_count(dataset%group_nproc))
      allocate(recv_offset(dataset%group_nproc))
      recv_count = ndim
      do i = 1, dataset%group_nproc
        recv_offset(i) = (i - 1) * max_ndim
      end do
      call MPI_GATHERV(start_, ndim, MPI_INT, dataset%group_start, recv_count, recv_offset, MPI_INT, 0, dataset%group_mpi_comm, ierr)
      call MPI_GATHERV(count_, ndim, MPI_INT, dataset%group_count, recv_count, recv_offset, MPI_INT, 0, dataset%group_mpi_comm, ierr)
      call MPI_SCATTERV(dataset%group_start, recv_count, recv_offset, MPI_INT, start_, ndim, MPI_INT, 0, dataset%group_mpi_comm, ierr)
      call MPI_SCATTERV(dataset%group_count, recv_count, recv_offset, MPI_INT, count_, ndim, MPI_INT, 0, dataset%group_mpi_comm, ierr)
      deallocate(recv_count)
      deallocate(recv_offset)
      ! When do asynchronous IO, the group root process will not occupy any data.
      if (dataset%async .and. dataset%is_group_root) then
        start_ = 0
        count_ = 0
      end if
    end if
#endif
    dataset%group_start(1:ndim,dataset%group_proc_id+1) = start_(1:ndim)
    dataset%group_count(1:ndim,dataset%group_proc_id+1) = count_(1:ndim)

  end subroutine gather_start_count

  pure integer function buffer_size(dataset, n) result(res)

    type(dataset_type), intent(in) :: dataset
    integer, intent(in) :: n

    integer i

    res = 0
    do i = 1, dataset%group_nproc
      res = res + product(dataset%group_count(1:n,i))
    end do

  end function buffer_size

end module fiona_dataset_mod
