from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, nvmlSystemGetProcessName
import torch

def get_nvidia_memory():
    nvmlInit()
    handle = nvmlDeviceGetHandleByIndex(0)  # 默认 GPU 0
    info = nvmlDeviceGetMemoryInfo(handle)
    return info.used  # 返回已用显存（字节）

def get_pytorch_memory():
    allocated = torch.cuda.memory_allocated()  # 当前张量占用的显存（字节）
    reserved = torch.cuda.memory_reserved()    # PyTorch 预留的总显存（字节）
    return allocated, reserved
