import time
import torch
import datetime
import torch.multiprocessing as mp
import torch.distributed     as dist

def setup(world_size, rank):
    store = dist.TCPStore("127.0.0.1", 23456, world_size, True if rank ==0 else False)
    dist.init_process_group(backend="nccl", 
                            #! 所有进程设置一致，设置为主进程的ip+端口
                            store = store,
                            #! 若已开启的进程数量未达到world_size，则会一直卡在这一行
                            world_size=world_size, rank=rank)
    #! 所有进程已经启动
    print(f"所有的 {world_size}个进程已启动，当前进程rank = {rank}")
    rank = dist.get_rank()
    if rank == 0:
        store.set("Key", "Value")
        print(f"rank {rank} set metadata ('Key', 'Value')")
    else:
        print(f"rank {rank} get metadata ('Key', {store.get('Key')})")
    tensor = torch.ones(3,3, dtype=torch.float32, device=f"cuda:{rank}")
    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
    print(f"rank {rank} tensor sum: {tensor}")

def destroy():
    dist.destroy_process_group()
    print(f"所有进程已结束，结束时间：{datetime.datetime.now()}")

def main(rank, world_size):
    setup(world_size, rank)
    # ...
    destroy()

if __name__ == "__main__":
    ngpus = torch.cuda.device_count()
    #! rank参数会自动传如
    mp.spawn(main, args=(ngpus, ), nprocs=ngpus, join=True)  # 阻塞等待所有进程完成

