import numpy as np
import os
import json
from mindspore.common import set_seed
from mindspore.communication.management import init, get_group_size, get_rank
from mindspore.parallel import set_algo_parameters
from mindspore import context

seed=100
def get_device_id():
    device_id = os.getenv('DEVICE_ID', '0')
    return int(device_id)


def init_context(is_distribute):
    np.random.seed(seed)
    set_seed(seed)
    rank_id=0
    device_num=1
    #args.distribute=False
    if is_distribute:
        device_id = get_device_id()  # 0 ~ 7
        # print(f'Device ID:{device_id}')
        context.set_context(mode=context.GRAPH_MODE, device_id=device_id, device_target="Ascend", save_graphs=False,
                            op_timeout=0)
        # context.set_context(mode=context.PYNATIVE_MODE, device_id=device_id, device_target="Ascend", save_graphs=False)

        init()
        rank_id = get_rank()  # local_rank
        device_num = get_group_size()  # world_size
        context.set_context(print_file_path=os.path.join("./device", f'print_rank_{rank_id}.log'))
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=context.ParallelMode.DATA_PARALLEL,
            device_num=device_num,
            gradients_mean=True)

        # context.set_auto_parallel_context(parallel_mode="auto_parallel", search_mode="sharding_propagation",
        #                                     device_num = device_num, full_batch=True)
        # set_algo_parameters(fully_use_devices=False)
        # context.set_context(reserve_class_name_in_scope=False)
        print(f'Init context for rank {rank_id}')

        return rank_id, device_num
    else:
        device_id = get_device_id()
        context.set_context(mode=context.GRAPH_MODE, device_id=device_id, device_target="Ascend", save_graphs=False)
        return rank_id, device_num
        