

import os
import torch
# from megatron import get_args
from ..megatron import mpu
from .set_args import set_args,get_args

'''
Functionality: init the args , get the device list and init the dist group
Author: Chuhongjie
Date: 2024-06-13
'''

def initialize_megatron(allow_no_cuda=False):
    """Set global variables, initialize distributed, and
    set autoresume and random seeds.
    `allow_no_cuda` should not be set unless using megatron for cpu only 
    data processing. In general this arg should not be set unless you know 
    what you are doing.
    Returns a function to finalize distributed env initialization 
    (optionally, only when args.lazy_mpu_init == True)
    """
    if not allow_no_cuda:
        # Make sure cuda is available.
        assert torch.cuda.is_available(), 'MP requires CUDA.'
    set_args()  ######
    # Pytorch distributed.
    gpus_list = _initialize_distributed()
    return gpus_list


def _initialize_distributed():
    """Initialize torch.distributed and mpu."""
    args = get_args()  #####
    print(args)

    # args.virtual_pipeline_model_parallel_size = 2       # gees 先尝试手动设置vpp_size------------------------------

    device_count = torch.cuda.device_count()

    if args.rank == 0:                  #
        print('> initializing torch distributed ...', flush=True)
    # Manually set the device ids.
    assigned_gpus = None
    if device_count > 0:
        # assign_gpus device_count:当前节点设备数量，
        # local_rank:当前进程的本地排名，pp_size:pipeline并行层数，gpus_per_stage:每个流水线阶段使用的GPU数量
        assigned_gpus = assign_gpus(device_count, args.local_rank, args.nstages_per_node, args.gpus_per_stage)  #gees----------------------
    # Call the init process
    init_method = 'tcp://'
    master_ip = os.getenv('MASTER_ADDR', 'localhost')
    master_port = os.getenv('MASTER_PORT', '6000')
    init_method += master_ip + ':' + master_port
    """
        这步与 GPU 分配无关，而是设置整个分布式训练环境。
        world_size: 总进程数。
        rank: 当前进程的全局排名。
        这确保所有进程可以相互通信，无论它们使用哪些 GPU。
    """

    # gees 对分布式模块进行一个初始化操作。
    torch.distributed.init_process_group(
        backend=args.distributed_backend,
        world_size=args.world_size, rank=args.rank,
        init_method=init_method)

    # gees 在某些情况下，你可以不显式调用 torch.cuda.set_device()，但这依赖于具体的应用场景和你所使用的分布式训练框架。
    #  如果不设置 torch.cuda.set_device()，PyTorch 将默认使用 cuda:0 作为当前设备，这可能会导致无法正确使用多 GPU 进行并行训练。
    torch.cuda.set_device(args.local_rank % device_count)       # 设置当前的进程的设备，gees 在这里设置后 torch.cuda.get_device() 才== args.local_rank
    # Set the tensor model-parallel, pipeline model-parallel, and
    # data-parallel communicators.
    if device_count > 0:
        if mpu.model_parallel_is_initialized():
            print('model parallel is already initialized')
        else:
            mpu.initialize_model_parallel(args.tensor_model_parallel_size,
                                          args.pipeline_model_parallel_size,args.virtual_pipeline_model_parallel_size)                        #TODO 这里和启动脚本里启动的进程是什么关系，不就是一个GPU一个进程吗???
                                          # 启动脚本里启动的进程是0,1,2,3       TODO 分配了进程组，tp=1，pp=4，1个机子 => 一个进程 组成一个 进程组group，但还没有分配GPU？？？
        # if args.lazy_mpu_init: gees 先删除
        #     return assigned_gpus
    if assigned_gpus:
        # torch.cuda.set_device(assigned_gpus[0])   #
        return assigned_gpus
    return None


def initialize_megatron_autoparallel(allow_no_cuda=False):
    """Set global variables, initialize distributed, and
    set autoresume and random seeds.
    `allow_no_cuda` should not be set unless using megatron for cpu only
    data processing. In general this arg should not be set unless you know
    what you are doing.
    Returns a function to finalize distributed env initialization
    (optionally, only when args.lazy_mpu_init == True)
    """
    if not allow_no_cuda:
        # Make sure cuda is available.
        assert torch.cuda.is_available(), 'MP requires CUDA.'
    # set_args()  ######
    # Pytorch distributed.
    gpus_list = _initialize_distributed_autoParallel()
    return gpus_list

def _initialize_distributed_autoParallel():
    """Initialize torch.distributed and mpu."""
    args = get_args()  #####
    print(args)

    # args.virtual_pipeline_model_parallel_size = 2       # gees 先尝试手动设置vpp_size------------------------------

    device_count = torch.cuda.device_count()

    if args.rank == 0:                  #
        print('> initializing torch distributed ...', flush=True)
    # Manually set the device ids.
    assigned_gpus = None
    if device_count > 0:
        # assign_gpus device_count:当前节点设备数量，
        # local_rank:当前进程的本地排名，pp_size:pipeline并行层数，gpus_per_stage:每个流水线阶段使用的GPU数量
        assigned_gpus = assign_gpus(device_count, args.local_rank, args.nstages_per_node, args.gpus_per_stage)  #gees----------------------
    # Call the init process
    # init_method = 'tcp://'
    # master_ip = os.getenv('MASTER_ADDR', 'localhost')
    # master_port = os.getenv('MASTER_PORT', '6000')
    # init_method += master_ip + ':' + master_port
    # """
    #     这步与 GPU 分配无关，而是设置整个分布式训练环境。
    #     world_size: 总进程数。
    #     rank: 当前进程的全局排名。
    #     这确保所有进程可以相互通信，无论它们使用哪些 GPU。
    # """
    #
    # # gees 对分布式模块进行一个初始化操作。
    # torch.distributed.init_process_group(
    #     backend=args.distributed_backend,
    #     world_size=args.world_size, rank=args.rank,
    #     init_method=init_method)

    # gees 在某些情况下，你可以不显式调用 torch.cuda.set_device()，但这依赖于具体的应用场景和你所使用的分布式训练框架。
    #  如果不设置 torch.cuda.set_device()，PyTorch 将默认使用 cuda:0 作为当前设备，这可能会导致无法正确使用多 GPU 进行并行训练。
    torch.cuda.set_device(args.local_rank % device_count)       # 设置当前的进程的设备，gees 在这里设置后 torch.cuda.get_device() 才== args.local_rank
    # Set the tensor model-parallel, pipeline model-parallel, and
    # data-parallel communicators.
    if device_count > 0:
        if mpu.model_parallel_is_initialized():
            print('model parallel is already initialized')
        else:
            mpu.initialize_model_parallel(args.tensor_model_parallel_size,
                                          args.pipeline_model_parallel_size,args.virtual_pipeline_model_parallel_size)                        #TODO 这里和启动脚本里启动的进程是什么关系，不就是一个GPU一个进程吗???
                                          # 启动脚本里启动的进程是0,1,2,3       TODO 分配了进程组，tp=1，pp=4，1个机子 => 一个进程 组成一个 进程组group，但还没有分配GPU？？？
        # if args.lazy_mpu_init: gees 先删除
        #     return assigned_gpus
    if assigned_gpus:
        # torch.cuda.set_device(assigned_gpus[0])   #
        return assigned_gpus
    return None

# assign gpus
#gees:这里的rank是local rank!
def assign_gpus(device_count, rank, pp_size, gpus_per_stage):
    """
    Assign GPUs to different pipeline stages based on the rank of the process.
    
    :param device_count: Total number of GPUs available.
    :param rank: Rank of the current process.  传入的是local_rank
    :param pp_size: Number of pipeline parallel stages.
    :param gpus_per_stage: Number of GPUs per pipeline stage. ----- 每个进程分配多少个GPU
    :return: List of GPU IDs assigned to the current process.
    """
    # Calculate total GPUs needed for all stages
    total_gpus_needed = pp_size * gpus_per_stage
    print(pp_size)
    print(gpus_per_stage)
    # Check if the total GPUs needed does not exceed the available GPUs
    if total_gpus_needed > device_count:
        raise ValueError("Not enough GPUs available for the configuration")
    
    # Calculate which stage this rank belongs to
    stage_index = rank % pp_size   # 假设pp_size=4,gpu_per_stage=2, local_rank=0,1,2,3,4,5,6,7??????
    
    # Calculate the GPU IDs for this stage
    start_gpu_id = stage_index * gpus_per_stage
    assigned_gpus = list(range(start_gpu_id, start_gpu_id + gpus_per_stage))  # 这些整数代表分配给当前进程（或流水线阶段）的 GPU ID
    # TODO 如果当前的pp=0，则返回的assigned_gpus=[0,1]
    return assigned_gpus


