import random
import numpy as np

import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import ParallelEnv
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy
from helm.static.models.layers import DEFAULTS

def compile_program(program, loss_name):
    build_strategy = fluid.compiler.BuildStrategy()
    build_strategy.enable_inplace = True
    build_strategy.fuse_all_optimizer_ops = True

    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = 4
    exec_strategy.num_iteration_per_drop_scope = 100

    compiled_train_prog = fluid.CompiledProgram(program).with_data_parallel(
        loss_name=loss_name,
        build_strategy=build_strategy,
        exec_strategy=exec_strategy)
    return compiled_train_prog


def dist_optimizer(optimizer):
    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = 3
    exec_strategy.num_iteration_per_drop_scope = 10

    dist_strategy = DistributedStrategy()
    dist_strategy.nccl_comm_num = 1
    dist_strategy.fuse_all_reduce_ops = True
    dist_strategy.exec_strategy = exec_strategy
    optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)

    return optimizer

def set_device(device):
    """
    Args:
        device (str): specify device type, 'cpu' or 'gpu'.

    Returns:
        fluid.CUDAPlace or fluid.CPUPlace: Created GPU or CPU place.
    """

    assert isinstance(device, str) and device.lower() in ['cpu', 'gpu'], \
        "Expected device in ['cpu', 'gpu'], but got {}".format(device)

    place = fluid.CUDAPlace(ParallelEnv().dev_id) \
        if device.lower() == 'gpu' and fluid.is_compiled_with_cuda() \
        else fluid.CPUPlace()

    return place


def set_seed(seed=0):
    DEFAULTS['seed'] = seed
    random.seed(seed)
    np.random.seed(seed)
    fluid.default_main_program().random_seed = seed
    fluid.default_startup_program().random_seed = seed