from typing import List
import time
import multiprocessing as mp
import psutil
import numpy as np
import torch
from prettytable import PrettyTable
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties

def format_runTime(seconds:float):
    """format running time to `day hours:minutes:seconds`
    :param seconds: 通常来说是两次time.time()的差值
    :return: format string
    """
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    d, h = divmod(h, 24)
    h = '0' + str(int(h)) if h < 10 else str(int(h))
    m = '0' + str(int(m)) if m < 10 else str(int(m))
    s = '0' + str(int(s)) if s < 10 else str(int(s))
    if d == 0:
        return f'{h}:{m}:{s}'
    else:
        return f'{d}d {h}:{m}:{s}'


class ProcessStatus():
    """
    参考：https://github.com/TingFree/NLPer-Arsenal/blob/9caa084ec7d2cb31534367ee9af2d28b51f9ee63/codes/nlper/utils/fn.py
    记录程序运行过程中GPU/CPU/内存的全局使用情况（不一定是主进程的实际使用情况，暂未实现进程跟踪功能）
    >>> gpu = 0  # 指定0号GPU，或者为None，不指定GPU
    >>> processStatus = ProcessStatus(gpu)
    >>> p = mp.Process(target=processStatus.record_running_status, args=(1,))
    >>> p.start()  # 开始执行监控进程
    >>> # 执行主进程，例如运行程序
    >>> p.terminate()  # 终结监控进程
    >>> processStatus.print_statisticAnalysis()  # 打印表信息
    >>> processStatus.plot_running_info()  # 打印图信息
    """
    def __init__(self, gpu:int=None):
        self.start = time.time()
        self.running_info = mp.Manager().list()
        self.gpu = gpu
        if gpu:
            import pynvml
            pynvml.nvmlInit()
            handle = pynvml.nvmlDeviceGetHandleByIndex(gpu)
            gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            self.device_total_memory = round(gpu_info.total/1024**2)  # MiB
            self.driver_version = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
            self.device_name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
            pynvml.nvmlShutdown()

    def record_running_status(self, interval=1):
        """供多进程调用，监控程序运行过程中的GPU、CPU、内存变化
        :param interval: 记录间隔，默认 1s 记录一次
        :return: 不间断运行，直至主进程内结束该子进程
        """
        start = self.start
        if self.gpu != None:  # 指定GPU的情况下
            import pynvml
            pynvml.nvmlInit()
            while True:
                cur_time = time.time()
                if cur_time - start >= interval:
                    start = cur_time
                    handle = pynvml.nvmlDeviceGetHandleByIndex(self.gpu)
                    gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                    mem = psutil.virtual_memory()
                    self.running_info.append({
                        'cur_time': cur_time,
                        'gpu_used': round(gpu_info.used / 1024 ** 2, 2),  # GPU显存占用量（MiB）
                        'gpu_util': pynvml.nvmlDeviceGetUtilizationRates(handle).gpu,  # GPU使用率（0~100）
                        'cpu_util': psutil.cpu_percent(),  # CPU使用率（0.0~100.0）
                        'mem_util': mem.percent,  # 内存使用率（0.0~100.0）
                        'mem_used': round(mem.used / 1024 ** 2)  # 内存占用量（MiB）
                    })
        else:  # 不指定GPU的情况下
            while True:
                cur_time = time.time()
                if cur_time - start >= interval:
                    start = cur_time
                    mem = psutil.virtual_memory()
                    self.running_info.append({
                        'cur_time': cur_time,
                        'cpu_util': psutil.cpu_percent(),  # CPU使用率（0.0~100.0）
                        'mem_util': mem.percent,  # 内存使用率（0.0~100.0）
                        'mem_used': round(mem.used / 1024 ** 2)  # 内存占用量（MiB）
                    })

    def print_statisticAnalysis(self):
        """统计分析程序运行时间以及GPU/CPU/内存使用情况，以表格形式呈现
        """
        start = self.start
        table = PrettyTable(['Param', 'Value'])
        if self.gpu != None:  # 指定GPU的情况下
            table.add_row(['cuda version', torch.version.cuda])
            table.add_row(['driver version', self.driver_version])
            table.add_row(['device', self.device_name])
            table.add_row(['device id', self.gpu])
            table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
            table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
            table.add_row(['running time', format_runTime(time.time() - start)])
            table.add_row(['device total memory', f'{self.device_total_memory} MiB'])
            table.add_row(['device max used memory', f"{round(np.max([t['gpu_used'] for t in self.running_info]), 2)} MiB"])
            table.add_row(['device avg util ratio', f"{round(np.mean([t['gpu_util'] for t in self.running_info]), 2)}%"])
        else:  # 不指定GPU的情况下
            table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
            table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
            table.add_row(['running time', format_runTime(time.time() - start)])
        table.add_row(['cpu avg util ratio', f"{round(np.mean([t['cpu_util'] for t in self.running_info]), 2)}%"])
        table.add_row(['memory max used', f"{round(np.max([t['mem_used'] for t in self.running_info]), 2)} MiB"])
        table.add_row(['memory avg util ratio', f"{round(np.mean([t['mem_util'] for t in self.running_info]), 2)}%"])
        table.align['Param'] = 'l'
        table.align['Value'] = 'l'
        print(table)

    def plot_running_info(self, show=False, saved_path='./status.png'):
        """以图表形式展现程序运行过程中的GPU/CPU/内存使用情况，默认不显示，只保存在'./status.png'
        :param show: 是否调用plt.show()画出该图
        :param saved_path: 将图保存在指定位置
        """
        font = FontProperties()
        font.set_family('serif')
        font.set_name('Times New Roman')
        font.set_style('normal')
        font.set_size(12)
        plt.style.use(['science', 'no-latex'])
        plt.figure(figsize=(12, 12), dpi=300)

        cur_time = [item['cur_time']-self.start for item in self.running_info]
        cpu_util = [item['cpu_util'] for item in self.running_info]
        mem_util = [item['mem_util'] for item in self.running_info]
        mem_used = [item['mem_used'] for item in self.running_info]

        if self.gpu != None:
            gpu_used = [item['gpu_used'] for item in self.running_info]
            gpu_util = [item['gpu_util'] for item in self.running_info]
            
            ax = plt.subplot(2, 1, 1)
            ax.plot(cur_time, gpu_util, label='gpu_util')
            ax.plot(cur_time, cpu_util, label='cpu_util')
            ax.plot(cur_time, mem_util, label='mem_util')
            plt.xticks(font_properties=font)
            plt.yticks(font_properties=font)
            plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
            plt.legend()
            ax = plt.subplot(2, 1, 2)
            ax.plot(cur_time, gpu_used, label='gpu_used')
            ax.plot(cur_time, mem_used, label='mem_used')
            plt.xticks(font_properties=font)
            plt.yticks(font_properties=font)
            plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
            plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
            plt.legend()
            plt.title("status", font_properties=font, fontsize=20)
        else:
            ax = plt.subplot(2, 1, 1)
            ax.plot(cur_time, cpu_util, label='cpu_util')
            ax.plot(cur_time, mem_util, label='mem_util')
            plt.xticks(font_properties=font)
            plt.yticks(font_properties=font)
            plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
            plt.legend()
            ax = plt.subplot(2, 1, 2)
            ax.plot(cur_time, mem_used, label='mem_used')
            plt.xticks(font_properties=font)
            plt.yticks(font_properties=font)
            plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
            plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
            plt.legend()
            plt.title("status", font_properties=font, fontsize=20)

        if show:
            plt.show()
        if saved_path:
            plt.savefig('./status.png')