# -*- coding: utf-8 -*-
#  /**
#  * Copyright (c) 2022 Beijing Jiaotong University
#  * PhotLab is licensed under [Open Source License].
#  * You can use this software according to the terms and conditions of the [Open Source License].
#  * You may obtain a copy of [Open Source License] at: [https://open.source.license/]
#  *
#  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
#  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
#  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#  *
#  * See the [Open Source License] for more details.
#  */
#  /**
#  * Author: Yang Wang
#  * Created: Aug. 2, 2023
#  * Supported by: National Key Research and Development Program of China
#  */

import logging
import os
from phot_engine.globals.task_priority import TaskPriority
from phot_engine.globals.singletion import Singleton
import psutil
import subprocess
import torch
import time
@Singleton
class ResourceManager(object):
    """
        This class is used to monitor hardware runtime usage
        and provides some algorithm for engine to choice current best strategy
    """
    def __init__(self):
        self.__cpu_number = psutil.cpu_count()  # CPU logical number, not real physical cpu
        self.__cpu_list = [i for i in range(0, self.__cpu_number)]

        self.__gpu_number = torch.cuda.device_count()  # Get GPU number
        self.__gpu_list = [i for i in range(0, self.__gpu_number)]

    def get_cpu_usage(self):
        """
            get current cpu usage
        :return:
        """
        cpu_usage = psutil.cpu_percent(interval=0.1, percpu=True)
        sorted_cores = sorted(range(len(cpu_usage)), key=lambda i: cpu_usage[i])
        return sorted_cores

    def get_gpu_usage(self):
        """
        Get GPU usage for each GPU over a 1-second interval and return the sorted list.
        :return: A list containing the GPU index sorted by average usage over the last second
        """
        try:
            #Initialize a dictionary to store the GPU usage information
            gpu_usage_dict = {}

            #Define the time interval for sampling (1 second)
            interval = 0.5
            while True:
                result = subprocess.check_output(['nvidia-smi','--query-gpu=index,utilization.gpu','--format=csv,noheader'],
                                                 universal_newlines=True)

                #Process the result and update the GPU usage dictionary
                gpu_info_list = result.strip().split('\n')
                for i in range(2):
                    for info in gpu_info_list:
                        gpu_index,gpu_utilization = info.strip().split(',')
                        gpu_index = int(gpu_index)
                        gpu_utilization = int(gpu_utilization.split()[0])
                        if gpu_index in gpu_usage_dict:
                            gpu_usage_dict[gpu_index].append(gpu_utilization)
                        else:
                            gpu_usage_dict[gpu_index] = [gpu_utilization]
                            # sleep fpr 1 second:
                        time.sleep(interval)

                #Calculate the GPUs by average usage
                avg_gpu_usage = {gpu_index:sum(usage_list)/len(usage_list) for gpu_index,usage_list in gpu_usage_dict.items()}

                #Sort the GPUs by average usage
                sorted_gpus = sorted(avg_gpu_usage.items(),key=lambda item:item[1])

                #Get the GPU indices in sorted order
                recommend_gpu = [str(item[0]) for item in sorted_gpus]

                return recommend_gpu
        except subprocess.CalledProcessError:
            return []

    def __limit_cpu_number_by_task_priority(self, priority: TaskPriority) -> int:
        """
        根据任务优先级限制CPU使用资源数目，如果限制数目为负数则默认使用全部CPU核心进行计算
        The number of CPU resources used is limited according to the task priority.
        If the limit number is negative, all CPU cores are used for calculation by default
        :param priority:
        :return:
        """
        if priority == TaskPriority.GOD:
            return self.__cpu_number
        elif priority == TaskPriority.URGENT:
            if self.__cpu_number - 1 >= 1:
                return self.__cpu_number - 1
            else:
                return self.__cpu_number
        elif priority == TaskPriority.MIDDLE:
            if self.__cpu_number - 2 >= 1:
                return self.__cpu_number - 2
            else:
                return self.__cpu_number
        elif priority == TaskPriority.LOW:
            if self.__cpu_number - 3 >= 1:
                return self.__cpu_number - 3
            return self.__cpu_number
        else:
            logging.debug("Unknown task priority or bugs in cpu allocation algorithm.priority:{}".format(priority))
            return 1

    def __limit_gpu_number_by_task_priority(self, priority: TaskPriority) -> int:
        """
        根据任务优先级限制GPU使用资源数目，如果限制数目为负数则默认使用全部GPU核心进行计算
        The number of GPU resources used is limited according to the task priority.
        If the limit number is negative, all GPU cores are used for calculation by default
        :param priority:
        :return:
        """
        if priority == TaskPriority.GOD:
            return self.__gpu_number
        elif priority == TaskPriority.URGENT:
            if self.__gpu_number - 1 >= 1:
                return self.__gpu_number - 1
            else:
                return self.__gpu_number
        elif priority == TaskPriority.MIDDLE:
            if self.__gpu_number - 2 >= 1:
                return self.__gpu_number - 2
            else:
                return self.__gpu_number
        elif priority == TaskPriority.LOW:
            if self.__gpu_number - 3 >= 1:
                return self.__gpu_number - 3
            return self.__gpu_number
        else:
            logging.debug("Unknown task priority or bugs in gpu allocation algorithm.priority:{}".format(priority))
            return 1

    def select_elements_in_order(self,input_list, limit):
        selected_elements = input_list[:limit]
        return selected_elements

    def recommend_cpus(self, priority=TaskPriority.LOW) -> list:
        """
            recommend spare cpus to engine
        :return:
        """
        cpu_limited = self.__limit_cpu_number_by_task_priority(priority)
        cpu_recommend_list= self.get_cpu_usage()
        selected_list_cpu = self.select_elements_in_order(cpu_recommend_list, cpu_limited)
        if not selected_list_cpu:
            return [0]
        else:
            return selected_list_cpu

    def recommend_gpus(self, priority=TaskPriority.LOW) -> list:
        """
        recommend spare GPUs to engine
        :return: A list of GPUs to be used by the engine
        """
        limited = self.__limit_gpu_number_by_task_priority(priority)
        gpu_recommend_list= self.get_gpu_usage()
        selected_list_gpu = self.select_elements_in_order(gpu_recommend_list, limited)
        if not selected_list_gpu:
            return [0]
        else:
            return selected_list_gpu

    def recommend_priority(self, priority: TaskPriority) -> int:
        """
            According to priority to set process priority
        :param priority:
        :return:
        """
        if os.name == "nt":
            """
                Windows 平台适配
            """
            if priority == TaskPriority.LOW:
                return psutil.BELOW_NORMAL_PRIORITY_CLASS
            elif priority == TaskPriority.MIDDLE:
                return psutil.NORMAL_PRIORITY_CLASS
            elif priority == TaskPriority.URGENT:
                return psutil.ABOVE_NORMAL_PRIORITY_CLASS
            elif priority == TaskPriority.GOD:
                return psutil.REALTIME_PRIORITY_CLASS
            else:
                return psutil.NORMAL_PRIORITY_CLASS
        else:
            """
                UNIX 平台适配: 进程优先级(nice)是一个数字，通常从 -20 到 20 。nice 值越高，进程的优先级越低。
            """
            if priority == TaskPriority.LOW:
                return 10
            elif priority == TaskPriority.MIDDLE:
                return 0
            elif priority == TaskPriority.URGENT:
                return -10
            elif priority == TaskPriority.GOD:
                return -20
            else:
                return 0

    def get_hardware_info(self):
        res_dict = {
            "CPU number": self.__cpu_number,
            "CPU usage": self.get_cpu_usage(),
            "GPU number": self.__gpu_number,
            "GPU usage": self.get_gpu_usage(),
        }
        return res_dict

    def allocate_gpu_resource(self,taskpriority,task_num):
        #根据任务数量均分GPU资源
        gpu_recommendation = self.recommend_gpus(priority=taskpriority)
        total_gpus = self.__limit_gpu_number_by_task_priority(priority=taskpriority)
        gpus_per_channel = task_num // total_gpus
        remainder = task_num % total_gpus
        gpu_allocation = {gpu_id:[] for gpu_id in gpu_recommendation}
        task_id = 1

        #分配任务给GPU
        for gpu_id in gpu_recommendation:
            tasks = gpus_per_channel

            if gpu_id <=remainder:
                tasks +=1
            #为每个信道任务指定GPU设备
            for _ in range(tasks):
                gpu_allocation[gpu_id].append((task_id))
                task_id += 1
        return gpu_allocation

if __name__ == '__main__':
    import time

    rm_1 = ResourceManager()
    while True:
        print(rm_1.get_hardware_info())
        print("recommend cpu: ", rm_1.recommend_cpus())
        print("recommend gpu: ",rm_1.recommend_gpus())
        time.sleep(1)
