import torch
_plot = False
_backend = "torch"


def config(plot: bool = False, backend: str = "numpy", gpu_device:int=None):
    """设置全局变量
    Args:
        plot (bool, optional): 设置是否开启全局画图，默认为 True。
        backend (str, optional): 设置运算后端，可以选择"numpy", "cupy", "torch"，
        注意使用"cupy"的话，需要额外安装cupy: "pip install cupy".
    """
    global _plot
    global _backend
    _plot = plot
    _backend = backend
    if (backend == "cuda" or backend=="torch") and gpu_device is not None:
        torch.device(f'cuda:{gpu_device}')


def set_device(gpu_device):
    """
    指定GPU
    """
    if (_backend == "cuda" or "torch") and gpu_device is not None:
        torch.cuda.set_device(gpu_device)


def is_available():
    """
    查看GPU是否可用
    """
    return torch.cuda.is_available()


def device_count():
    """
    GPU数量
    """
    return torch.cuda.device_count()


def current_device():
    """
    当前GPU的索引
    """
    return torch.cuda.current_device()


def get_device_name():
    """
    输出GPU名称
    """
    return torch.cuda.get_device_name(0)
