import torch


def torch_distributed_get_info(group: torch.distributed.ProcessGroup | None = None) -> tuple[int, int]:
    try:
        rank = torch.distributed.get_rank(group)
        size = torch.distributed.get_world_size(group)
    except ValueError:
        rank = 0
        size = 1
    return rank, size


def torch_distributed_is_rank0(group: torch.distributed.ProcessGroup | None = None) -> bool:
    rank, _ = torch_distributed_get_info(group)
    return rank == 0


def torch_distributed_all_sum(
    value: int | float,
    device: torch.device | None,
    group: torch.distributed.ProcessGroup | None = None,
) -> int | float:
    _, size = torch_distributed_get_info(group)
    if size == 1:
        return value

    value = torch.tensor(value, device=device)
    torch.distributed.all_reduce(value, torch.distributed.ReduceOp.SUM, group)
    value = value.cpu().item()
    return value
