import os
import datetime
import torch
import torch.distributed as dist

def init_distributed(args):
    args.world_size = int(os.environ["WORLD_SIZE"])
    args.rank = int(os.environ["RANK"])
    args.local_rank = int(os.environ["LOCAL_RANK"])

    torch.cuda.set_device(args.local_rank)
    args.dist_backend = 'nccl'
    print('| distributed init (rank {}): {}, gpu {}'.format(
        args.rank, args.dist_url, args.local_rank), flush=True)
    dist.init_process_group(
        backend=args.dist_backend, init_method=args.dist_url,
        world_size=args.world_size, rank=args.rank,
        timeout=datetime.timedelta(0, 7200)
    )
    print('before barrier (rank {})'.format(args.rank), flush=True)
    dist.barrier()
    print('after barrier (rank {})'.format(args.rank), flush=True)

# 示例调用
class Args:
    dist_url = 'env://'

args = Args()
init_distributed(args)