import pynvml
import torch
import time

devices = [2]

def monitor_gpu_memory():
    pynvml.nvmlInit()
    free_memory = 0
    # deviceCount = pynvml.nvmlDeviceGetCount()
    # print(deviceCount)
    devices = [2]
    for i in devices:
        handle = pynvml.nvmlDeviceGetHandleByIndex(i)
        info = pynvml.nvmlDeviceGetMemoryInfo(handle)
        total_memory = info.total / (1024 ** 3)
        used_memory = info.used / (1024 ** 3)
        free_memory = info.free / (1024 ** 3)
        print(f"GPU {i}: Total Memory: {total_memory:.2f} GB, Used Memory: {used_memory:.2f} GB, Free Memory: {free_memory:.2f} GB")
    pynvml.nvmlShutdown()
    return free_memory


def occupy_gpu_memory(gb_to_occupy):
    while True:
        try:
            tensor_size = int(gb_to_occupy * 1024 ** 3 / 4)
            device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
            x = torch.randn(tensor_size, dtype=torch.float32).to(device)
            print(f"Occupied {gb_to_occupy} GB of GPU memory.")
            while True:
                time.sleep(1)
        except Exception as e:
            print(f"Failed to occupy GPU memory: {e}")


if __name__ == "__main__":
    free_m = monitor_gpu_memory()
    gb_to_occupy = 1
    occupy_gpu_memory(free_m)