artificialguybr commited on
Commit
9e19d29
β€’
1 Parent(s): 45a4010

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -1
app.py CHANGED
@@ -19,6 +19,9 @@ import torch
19
  import torchvision
20
  from tqdm import tqdm
21
  from numba import jit
 
 
 
22
 
23
  os.environ["COQUI_TOS_AGREED"] = "1"
24
 
@@ -26,6 +29,26 @@ ZipFile("ffmpeg.zip").extractall()
26
  st = os.stat('ffmpeg')
27
  os.chmod('ffmpeg', st.st_mode | stat.S_IEXEC)
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  model_size = "small"
30
  model = WhisperModel(model_size, device="cuda", compute_type="int8")
31
 
@@ -112,7 +135,11 @@ def process_video(radio, video, target_language):
112
  print(f"File {file} not found for deletion.")
113
 
114
  return output_video_path
115
-
 
 
 
 
116
  def swap(radio):
117
  if(radio == "Upload"):
118
  return gr.update(source="upload")
 
19
  import torchvision
20
  from tqdm import tqdm
21
  from numba import jit
22
+ import threading
23
+ import time
24
+ import GPUtil
25
 
26
  os.environ["COQUI_TOS_AGREED"] = "1"
27
 
 
29
  st = os.stat('ffmpeg')
30
  os.chmod('ffmpeg', st.st_mode | stat.S_IEXEC)
31
 
32
+ # Initialize peak usage variables
33
+ peak_gpu_usage = 0.0
34
+ peak_vram_usage = 0.0
35
+
36
+ # Monitoring function
37
+ def monitor_gpu_usage():
38
+ global peak_gpu_usage, peak_vram_usage
39
+ while True:
40
+ gpus = GPUtil.getGPUs()
41
+ for gpu in gpus:
42
+ peak_gpu_usage = max(peak_gpu_usage, gpu.load)
43
+ peak_vram_usage = max(peak_vram_usage, gpu.memoryUsed)
44
+ time.sleep(1) # Check every second
45
+
46
+ # Start the monitoring thread
47
+ monitor_thread = threading.Thread(target=monitor_gpu_usage)
48
+ monitor_thread.daemon = True
49
+ monitor_thread.start()
50
+
51
+ #Whisper
52
  model_size = "small"
53
  model = WhisperModel(model_size, device="cuda", compute_type="int8")
54
 
 
135
  print(f"File {file} not found for deletion.")
136
 
137
  return output_video_path
138
+
139
+ # Display peak usages at the end
140
+ print(f"Peak GPU usage: {peak_gpu_usage * 100}%")
141
+ print(f"Peak VRAM usage: {peak_vram_usage}MB")
142
+
143
  def swap(radio):
144
  if(radio == "Upload"):
145
  return gr.update(source="upload")