
from pynvml import *


def get_nv_smi():
    handle = nvmlDeviceGetHandleByIndex(0)  # 0 是 GPU id
    meminfo = nvmlDeviceGetMemoryInfo(handle)
    print(meminfo.total / 1024 / 1024)  # 显存大小
    print(meminfo.used / 1024 / 1024)  # 单位：字节bytes，所以要想得到以兆M为单位就需要除以1024**2
    print(meminfo.free / 1024 / 1024)  # 显卡剩余显存大小
    print(nvmlDeviceGetCount())  # 查询显示GPU数目


def nvidia_info():
    nvidia_dict = {
        "state": True,
        "nvidia_version": "",
        "nvidia_count": 0,
        "gpus": []
    }
    try:
        nvmlInit()
        nvidia_dict["nvidia_version"] = nvmlSystemGetDriverVersion()
        nvidia_dict["nvidia_count"] = nvmlDeviceGetCount()
        for i in range(nvidia_dict["nvidia_count"]):
            handle = nvmlDeviceGetHandleByIndex(i)
            memory_info = nvmlDeviceGetMemoryInfo(handle)
            gpu = {
                "gpu_name": nvmlDeviceGetName(handle),
                "total": memory_info.total,
                "free": memory_info.free,
                "used": memory_info.used,
                "temperature": f"{nvmlDeviceGetTemperature(handle, 0)}℃",
                "powerStatus": nvmlDeviceGetPowerState(handle)
            }
            nvidia_dict['gpus'].append(gpu)
    except NVMLError as _:
        nvidia_dict["state"] = False
    except Exception as _:
        nvidia_dict["state"] = False
    finally:
        try:
            nvmlShutdown()
        except:
            pass
    return nvidia_dict
import time

def check_gpu_mem_usedRate():
    max_rate = 0.0

    for i in range(100):
        info = nvidia_info()
        print(info)
        used = info['gpus'][0]['used']
        total = info['gpus'][0]['total']
        temperature = info['gpus'][0]['temperature']
        print(f"GPU0 used: {used}, total: {total}, 使用率：{used/total}")
        if used / total > max_rate:
            max_rate = used / total
        print("GPU0 最大使用率：", max_rate)
        time.sleep(0.2)


        # break
        # if __name__ == '__main__':
# nvmlInit()
# get_nv_smi()
# nvidia_info()
# check_gpu_mem_usedRate()


# import GPUtil
# import time
# time_x = []
# gpu_y = []

# def gpu_util_timer(self):
#     for n in range(10):
#         Graph_Util.gpu_y.append(GPUtil.showUtilization())
#         Graph_Util.time_x.append(n)
#         time.sleep(1)
#     print('gpu done')
import GPUtil
from threading import Thread
import time

class Monitor(Thread):
    def __init__(self, delay):
        super(Monitor, self).__init__()
        self.stopped = False
        self.delay = delay # Time between calls to GPUtil
        self.start()

    def run(self):
        while not self.stopped:
            GPUtil.showUtilization()
            time.sleep(self.delay)

    def stop(self):
        self.stopped = True
        
# Instantiate monitor with a 10-second delay between updates
monitor = Monitor(1)

# Train, etc.

# Close monitor
monitor.stop()