import os
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
os.environ['RANK'] = "0"
os.environ['WORLD_SIZE'] = "1"
os.environ['MASTER_ADDR'] = "localhost"
os.environ['MASTER_PORT'] = "10099"
def setup(rank, world_size):
    dist.init_process_group("hccl", rank=rank, world_size=world_size)

if __name__ == "__main__":
    setup(rank=6, world_size=1)
    tensor = torch.ones(10).npu(6)
    dist.all_reduce(tensor)
    print(f"Rank 6 result: {tensor}")
