File size: 1,452 Bytes
a53c64d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os
import json
import csv
import subprocess
import psutil

def load_model():
    pass

def run():
    # WSL内で取得するとホストの値とは異なるので使い物にならない
    try:
        # load average per cpu count
        la = round((psutil.getloadavg()[0] / psutil.cpu_count()) * 100)

        # cpu
        cpu = round(psutil.cpu_percent(percpu=False))

        # ram
        ram = psutil.virtual_memory()
        rama = round(ram.available)
        ramt = round(ram.total)
        ramp = 100 - ram.percent
        ramg = round(ram.available / 1073741824)

        # vram
        cmd = 'nvidia-smi --query-gpu=utilization.gpu,memory.used,memory.total --format=csv,noheader,nounits'
        csv = subprocess.run(cmd, shell=True, capture_output=True, text=True).stdout
        r = csv.split(',')
        gpu = round(float(r[0]))
        vrama = round(float(r[1]))
        vramt = round(float(r[2]))
        vramp = round(vrama / vramt * 100)
        vramg = round(float(r[1]) / 1024)

        # network
        # 1秒ぶんだけ取りたかった
        #net = psutil.net_io_counters()
        #ni = round((float(net.bytes_recv) / 1048576) * 100)
        #no = round((float(net.bytes_sent) / 1048576) * 100)

        res = {
            'la': la,
            'cpu': cpu,
            'ram': ramg,
            'gpu': gpu,
            'vram': vramg,
        }

        return res
    except Exception as e:
        return {}