import os
from typing import Any, Callable, TypeVar

import torch

T = TypeVar("T")


def torch_distributed_get_info():
    try:
        process_rank = torch.distributed.get_rank()
        processes_num = torch.distributed.get_world_size()
    except ValueError:
        process_rank = 0
        processes_num = 1
    return process_rank, processes_num


def torch_distributed_is_rank0():
    process_rank, _ = torch_distributed_get_info()
    return process_rank == 0


def torch_distributed_load(
    f: str | os.PathLike, *,
    map_location: Callable[[torch.Tensor, str], torch.Tensor] | torch.device | str | dict[str, str] | None = None,
    process_rank: int = 0,
    process_group_size: int = 1,
    process_rank_src: int = 0,
    process_device: torch.device | str | None = None,
):
    if process_rank == process_rank_src:
        state = torch.load(f, map_location=map_location)
        state_list = [state]
    else:
        state_list = [None]
    if process_group_size > 1:
        torch.distributed.broadcast_object_list(state_list, src=process_rank_src, device=process_device)
    return state_list[0]


def torch_distributed_broadcast_object(
    obj: T,
    src: int = 0,
    group: Any = None,
    device: Any = None
) -> T:
    object_list = [obj]
    torch.distributed.broadcast_object_list(object_list, src, group, device)
    obj, = object_list
    return obj


def torch_distributed_broadcast_bool(
    value: bool | None,
    src: int = 0,
    group: Any = None,
    device: Any = None
) -> bool:
    if value is None:
        value_tensor = torch.empty((), dtype=torch.bool, device=device)
    elif value:
        value_tensor = torch.ones((), dtype=torch.bool, device=device)
    else:
        value_tensor = torch.zeros((), dtype=torch.bool, device=device)
    torch.distributed.broadcast(value_tensor, src, group)
    return value_tensor.cpu().item()
