import torch
import datetime
import torch.distributed     as dist
import torch.multiprocessing as mp 

def setup(world_size, rank):
    store = dist.TCPStore("127.0.0.1", 23456, world_size, True if rank ==0 else False)
    dist.init_process_group(backend="nccl", 
                            #! 所有进程设置一致，设置为主进程的ip+端口
                            store = store,
                            #! 若已开启的进程数量未达到world_size，则会一直卡在这一行
                            world_size=world_size, rank=rank)
    # 全局进程组
    global_backend = dist.get_backend()
    # print(f"Global Backend: {global_backend} from rank {rank}")

    custom_group = dist.new_group(ranks= list(range(dist.get_world_size())))
    dist.barrier()   #! 同步等待
    custom_backend = dist.get_backend(custom_group)
    print(f"Custom Backend: {custom_backend} from rank {dist.get_rank(custom_group)}")

    dist.destroy_process_group(custom_group)


def destroy():
    dist.destroy_process_group()
    print(f"所有进程已结束，结束时间：{datetime.datetime.now()}")

def main(rank, world_size):
    setup(world_size, rank)
    # ...
    destroy()

if __name__ == "__main__":
    ngpus = torch.cuda.device_count()
    #! rank参数会自动传如
    mp.spawn(main, args=(ngpus, ), nprocs=ngpus, join=True)  # 阻塞等待所有进程完成