gobeldan commited on
Commit
6407f12
·
verified ·
1 Parent(s): 3fe2792
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -7,22 +7,31 @@ from languages import get_language_names, get_language_from_name
7
  from subtitle_manager import Subtitle
8
  from pathlib import Path
9
  import psutil
10
- import torch
11
 
12
  logging.basicConfig(level=logging.INFO)
13
  last_model = None
14
  model = None
15
 
16
 
 
 
 
 
 
 
 
17
  def get_workers_count():
18
- if torch.cuda.is_available():
19
- memory = torch.cuda.get_device_properties(0).total_memory
20
- else:
 
21
  memory = psutil.virtual_memory().total
22
-
 
 
23
  workers = int(memory / 2_000_000_000)
24
  logging.info(f"workers:{workers}")
25
- logging.info(f"memory:{memory/ 1_000_000_000} GB")
26
  return workers
27
 
28
  def transcribe_webui_simple_progress(modelName, languageName, urlData, multipleFiles, microphoneData, task,
 
7
  from subtitle_manager import Subtitle
8
  from pathlib import Path
9
  import psutil
10
+ import pynvml
11
 
12
  logging.basicConfig(level=logging.INFO)
13
  last_model = None
14
  model = None
15
 
16
 
17
+ def get_free_gpu_memory():
18
+ pynvml.nvmlInit()
19
+ handle = pynvml.nvmlDeviceGetHandleByIndex(0)
20
+ meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
21
+ pynvml.nvmlShutdown()
22
+ return meminfo.free
23
+
24
  def get_workers_count():
25
+ try:
26
+ memory = get_free_gpu_memory()
27
+ logging.info("CUDA memory")
28
+ except Exception:
29
  memory = psutil.virtual_memory().total
30
+ logging.info("RAM memory")
31
+
32
+ logging.info(f"memory:{memory/ 1_000_000_000} GB")
33
  workers = int(memory / 2_000_000_000)
34
  logging.info(f"workers:{workers}")
 
35
  return workers
36
 
37
  def transcribe_webui_simple_progress(modelName, languageName, urlData, multipleFiles, microphoneData, task,