from mpi4py import MPI
import numpy as np
import sys
import time

start_time = time.perf_counter()

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

# length of vectors
n = 4000000

# arbitrary example vectors
if comm.rank == 0:
    x = np.linspace(0, 100, n)
else:
    x = None

# initialize as numpy arrays
global_max = np.array([0.])

# load balance
local_n = int(n/size) + 1
remainder = n % size
if rank < remainder:
    local_n += 1

# initialize as np arrays
local_x = np.zeros(local_n)

# divide up vectors
count = []
displace = []
for i in range(size):
    if i < remainder:
        count.append(local_n)
        displace.append(i*local_n)
    else:
        count.append(local_n - 1)
        displace.append(i*(local_n - 1))

comm.Scatterv([x, tuple(count), tuple(displace), MPI.DOUBLE], local_x, root=0)

# local computation of dot product
local_max = np.array(np.max(local_x))

# sum the results of each
comm.Reduce(local_max, global_max, op=MPI.MAX)

stop_time = time.perf_counter()
cost_time = stop_time - start_time

if rank == 0:
    print(
        f"The supremum norm is {global_max[0]} computed in parallel and {cost_time:.8f} costed\nMax element is {np.max(x)} by numpy.")
