subroutine start_mpi

use mpi
use mpi_basic_var

implicit none

call mpi_init(ierr)
call mpi_comm_rank(mpi_comm_world,n_rank,ierr)
call mpi_comm_size(mpi_comm_world,n_proc,ierr)
print*,'MPI size,rank: ',n_proc,n_rank

! Determina numero di nodi da porre uguale al numero di listelli
open(unit=1,file='mpi_gpu.dat')
read(1,*) node_dir
read(1,*) NGPUXNODE
read(1,*) NCOREXNODE
read(1,*) ratio_GPU
read(1,*) openmp_activate
close(1)
if(openmp_activate == 1) then
  if(mod(n_proc,NGPUXNODE+1) .ne. 0) then
    print*,'Error! Number of processors must be multiple of NGPUXNODE+1 if openmp is active'
    call MPI_FINALIZE(ierr)
    STOP
  endif
else
  if(mod(n_proc,NCOREXNODE) .ne. 0) then
    print*,'Error! Number of processors must be multiple of NCOREXNODE if openmp is not active'
    call MPI_FINALIZE(ierr)
    STOP
  endif
endif
if(NCOREXNODE .lt. NGPUXNODE) then
  print*,'Error! Number of cores x node must be greater equal than gpus x node'
  call MPI_FINALIZE(ierr)
  STOP
endif
if(node_dir .gt. n_decomp) then
  print*,'Error! node_dir must be less or equal n_decomp'
  call MPI_FINALIZE(ierr)
  STOP
endif

end subroutine start_mpi

subroutine MapDataToProc(n_tot,n_proc_dir,st,en,sz)
!    
implicit none
integer n_tot,n_proc_dir,st(0:n_proc_dir-1),en(0:n_proc_dir-1),sz(0:n_proc_dir-1)
integer i,n_size,nl,nu

n_size=n_tot/n_proc_dir
nu = n_tot - n_size * n_proc_dir
nl = n_proc_dir - nu
st(0) = 1
sz(0) = n_size
en(0) = n_size
do i=1,nl-1
  st(i) = st(i-1) + n_size
  sz(i) = n_size
  en(i) = en(i-1) + n_size
enddo
n_size = n_size + 1
do i=nl,n_proc_dir-1
   st(i) = en(i-1) + 1
   sz(i) = n_size
   en(i) = en(i-1) + n_size
enddo
en(n_proc_dir-1)= n_tot
sz(n_proc_dir-1)= n_tot-st(n_proc_dir-1)+1

end subroutine MapDataToProc

subroutine MapDataToProc_fast(n_tot,n_proc_dir,n_proc_fast,ratio_fast,st,en,sz)
!    
use mpi
implicit none
integer n_tot,n_proc_dir,st(0:n_proc_dir-1),en(0:n_proc_dir-1),sz(0:n_proc_dir-1)
integer, dimension(:), allocatable :: fast_st,fast_sz,fast_en,slow_st,slow_sz,slow_en
integer i,n_size,nl,nu,ierr,i_nu,n_slow,n_fast
integer :: n_proc_fast, n_proc_slow, n_proc_virtual
real :: ratio_fast

! First split nodes among slow and fast processes
n_proc_slow = n_proc_dir - n_proc_fast
n_proc_virtual = n_proc_slow + ratio_fast*n_proc_fast
n_slow = (n_tot*n_proc_slow)/n_proc_virtual
n_fast = (n_tot*ratio_fast*n_proc_fast)/n_proc_virtual
nu = n_tot - n_slow - n_fast

if(nu .lt. 0) then
   do i_nu = 1,-nu
   !   print*,'Giving rest to :',i_nu
      if(mod(i_nu,2).eq.0) then
        n_slow = n_slow - 1
      else
        n_fast = n_fast - 1
      endif
   enddo
else 
!print*,'nu:  ',nu
   do i_nu = 1,nu
   !   print*,'Giving resto to :',i_nu
      if(mod(i_nu,2).eq.0) then
        n_slow = n_slow + 1
      else
        n_fast = n_fast + 1
      endif
   enddo
endif

if(n_slow+n_fast .ne. n_tot) then
  print*,'MPI decomposition failed - B'
  call MPI_FINALIZE(ierr)
  STOP
endif
if(n_proc_fast .gt. 0) then
   allocate(fast_st(0:n_proc_fast-1),fast_sz(0:n_proc_fast-1),fast_en(0:n_proc_fast-1))
   call MapDataToProc(n_fast,n_proc_fast,fast_st,fast_en,fast_sz)
   st(0:n_proc_fast-1) = fast_st(0:n_proc_fast-1)
   en(0:n_proc_fast-1) = fast_en(0:n_proc_fast-1)
   sz(0:n_proc_fast-1) = fast_sz(0:n_proc_fast-1)
endif
if(n_proc_slow .gt. 0) then
  allocate(slow_st(0:n_proc_slow-1),slow_sz(0:n_proc_slow-1),slow_en(0:n_proc_slow-1))
  call MapDataToProc(n_slow,n_proc_slow,slow_st,slow_en,slow_sz)
  st(n_proc_fast:n_proc_dir-1) = fast_en(n_proc_fast-1)+slow_st(0:n_proc_slow-1)
  sz(n_proc_fast:n_proc_dir-1) = slow_sz(0:n_proc_slow-1)
  en(n_proc_fast:n_proc_dir-1) = fast_en(n_proc_fast-1)+slow_en(0:n_proc_slow-1)
endif

end subroutine MapDataToProc_fast

subroutine end_mpi

use mpi

integer :: ierr

call mpi_finalize(ierr)

end subroutine end_mpi
