#   /**
#   * Copyright (c) 2022 Beijing Jiaotong University
#   * PhotLab is licensed under [Open Source License].
#   * You can use this software according to the terms and conditions of the [Open Source License].
#   * You may obtain a copy of [Open Source License] at: [https://open.source.license/]
#   *
#   * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
#   * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
#   * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#   *
#   * See the [Open Source License] for more details.
#   */
#   /**
#   * Author: Zheng Wang
#   * Created: Oct. 31, 2022
#   * Supported by: National Key Research and Development Program of China
#   */

import pynvml
import time
from multiprocessing import Process
from multiprocessing import Queue

UNIT = 1024 * 1024


class Monitor(Process):
    def __init__(self):
        super(Monitor, self).__init__()
        pynvml.nvmlInit()
        # 获取Nvidia GPU块数
        self.__gpu_device_count = pynvml.nvmlDeviceGetCount()
        self.__gpu_info_queue = Queue()

    def run(self) -> None:
        while True:
            for gpu_id in range(self.__gpu_device_count):
                self.__gpu_info_queue.put(self.__get_one_gpu_status(gpu_id))
            time.sleep(5)

    def get_gpu_runtime_info(self):
        return self.__gpu_info_queue.get()

    def __get_one_gpu_status(self, gpu_id):
        pynvml.nvmlInit()
        # 获取GPU i的handle，后续通过handle来处理
        handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
        # 通过handle获取GPU i的信息
        memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
        res_dict = {
            "id": gpu_id,
            "free_memory_storage": memory_info.free / UNIT,  # MB
            "free_util_rate": memory_info.free / memory_info.free,
        }
        return res_dict


if __name__ == '__main__':
    gpu_monitor = Monitor()
    gpu_monitor.start()
    while True:
        print(gpu_monitor.get_gpu_runtime_info())
        time.sleep(5)