# -*- coding: utf-8 -*-


# """ PP
print('PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP')
world_size: int = 4 * 8  # 总卡数
gpus_per_node = 8  # 每个节点卡数
pipeline_model_parallel_size = 4  # PP
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
for node_ in range(world_size // gpus_per_node):
    for rank in range(node_*gpus_per_node, (node_+1)*gpus_per_node):  # 节点上的所有卡
        _PIPELINE_MODEL_PARALLEL_NODE_INFO = [1] * gpus_per_node
        node_id = rank // gpus_per_node
        for i in range(num_pipeline_model_parallel_groups):  
            ranks = range(i, world_size, num_pipeline_model_parallel_groups)
            if node_ == 0 and rank == node_*gpus_per_node:
                print(list(ranks))
            # When on the same node
            if ranks[0] // gpus_per_node == node_id:
                _PIPELINE_MODEL_PARALLEL_NODE_INFO[ranks[0] % gpus_per_node] = 0
            if ranks[-1] // gpus_per_node == node_id:
                _PIPELINE_MODEL_PARALLEL_NODE_INFO[ranks[-1] % gpus_per_node] = 2
    print(node_, _PIPELINE_MODEL_PARALLEL_NODE_INFO) # 结果有0或2的节点需要生成数据
print('PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP')
# """

# """ DP
print('DPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDP')
world_size: int = 4 * 8  # 总卡数
gpus_per_node = 8  # 每个节点卡数
pipeline_model_parallel_size = 4  # PP
tensor_model_parallel_size = 4  # TP
context_parallel_size = 1
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
all_data_parallel_group_ranks_with_cp = []
for i in range(pipeline_model_parallel_size):  # 每个PP
    start_rank = i * num_pipeline_model_parallel_groups
    end_rank = (i + 1) * num_pipeline_model_parallel_groups
    for j in range(context_parallel_size * tensor_model_parallel_size):
        ranks = range(
            start_rank + j, end_rank, context_parallel_size * tensor_model_parallel_size
        )
        print('ranks',list(ranks))
        # group = torch.distributed.new_group(
        #     ranks, timeout=timeout, pg_options=get_nccl_options('dp', nccl_comm_cfgs)
        # )
        # group_gloo = torch.distributed.new_group(ranks, timeout=timeout, backend="gloo")
        # if rank in ranks:
        #     _DATA_PARALLEL_GROUP = group
        #     _DATA_PARALLEL_GROUP_GLOO = group_gloo
        #     _DATA_PARALLEL_GLOBAL_RANKS = ranks
    print('---------------')
    # for j in range(tensor_model_parallel_size):
    #     ranks_with_cp = range(start_rank + j, end_rank, tensor_model_parallel_size)
    #     all_data_parallel_group_ranks_with_cp.append(list(ranks_with_cp))
    #     print('witcp', list(ranks_with_cp))
    #     # group_with_cp = torch.distributed.new_group(
    #     #     ranks_with_cp, timeout=timeout, pg_options=get_nccl_options('dp_cp', nccl_comm_cfgs)
    #     # )
    #     # group_with_cp_gloo = torch.distributed.new_group(
    #     #     ranks_with_cp, timeout=timeout, backend="gloo"
    #     # )
    #     # if rank in ranks_with_cp:
    #     #     _DATA_PARALLEL_GROUP_WITH_CP = group_with_cp
    #     #     _DATA_PARALLEL_GROUP_WITH_CP_GLOO = group_with_cp_gloo
    #     #     _DATA_PARALLEL_GLOBAL_RANKS_WITH_CP = ranks_with_cp
print('DPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDPDP')
# """
