# -*- coding: utf-8 -*-
# @Time    : 2023/6/13 4:37 下午
# @Author  : Wu WanJie

import os
import torch
import torch.distributed as dist


def main():
    local_rank = int(os.environ.get("LOCAL_RANK", -1))
    torch.distributed.init_process_group(backend="nccl")
    torch.cuda.set_device(local_rank)

    if local_rank != 0:
        torch.distributed.barrier()
    print("mid distributed")
    if local_rank == 0:
        torch.distributed.barrier()

    print("Success")


if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
    main()
