Spaces:
Running
Running
Fixed the issue causing 'ERROR: Exception in ASGI application' triggered by Gradio 5.x.
Browse filesSSR will automatically be enabled on Spaces for apps that run Gradio 5! If you would like to disable it, you can set ssr_mode=False in launch().
https://github.com/gradio-app/gradio/issues/9463#issuecomment-2442967320
- README.md +1 -1
- app.py +3 -1
- requirements-fasterWhisper.txt +2 -2
- requirements-whisper.txt +2 -2
- requirements.txt +1 -1
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: ✨
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.4.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
CHANGED
@@ -1365,7 +1365,8 @@ def create_ui(app_config: ApplicationConfig):
|
|
1365 |
else:
|
1366 |
print("Queue mode disabled - progress bars will not be shown.")
|
1367 |
|
1368 |
-
demo.launch(inbrowser=app_config.autolaunch, share=app_config.share, server_name=app_config.server_name, server_port=find_free_port()
|
|
|
1369 |
|
1370 |
# Clean up
|
1371 |
ui.close()
|
@@ -1451,6 +1452,7 @@ if __name__ == '__main__':
|
|
1451 |
if torch.cuda.is_available():
|
1452 |
deviceId = torch.cuda.current_device()
|
1453 |
totalVram = torch.cuda.get_device_properties(deviceId).total_memory
|
|
|
1454 |
if totalVram/(1024*1024*1024) <= 4: #VRAM <= 4 GB
|
1455 |
updated_config.vad_process_timeout = 0
|
1456 |
except Exception as e:
|
|
|
1365 |
else:
|
1366 |
print("Queue mode disabled - progress bars will not be shown.")
|
1367 |
|
1368 |
+
demo.launch(inbrowser=app_config.autolaunch, share=app_config.share, server_name=app_config.server_name, server_port=find_free_port(),
|
1369 |
+
ssr_mode=False) # [Gradio 5.x] ERROR: Exception in ASGI application
|
1370 |
|
1371 |
# Clean up
|
1372 |
ui.close()
|
|
|
1452 |
if torch.cuda.is_available():
|
1453 |
deviceId = torch.cuda.current_device()
|
1454 |
totalVram = torch.cuda.get_device_properties(deviceId).total_memory
|
1455 |
+
print(f"Total Vram: {totalVram/(1024*1024*1024):.4f}G")
|
1456 |
if totalVram/(1024*1024*1024) <= 4: #VRAM <= 4 GB
|
1457 |
updated_config.vad_process_timeout = 0
|
1458 |
except Exception as e:
|
requirements-fasterWhisper.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
ctranslate2>=4.4.0
|
3 |
faster-whisper>=1.0.3
|
4 |
ffmpeg-python==0.2.0
|
5 |
-
gradio==5.0
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|
@@ -23,4 +23,4 @@ optimum
|
|
23 |
# Needed by ALMA-GGUL
|
24 |
ctransformers[cuda]
|
25 |
# Needed by load_in_4bit parameters in transformers
|
26 |
-
bitsandbytes
|
|
|
2 |
ctranslate2>=4.4.0
|
3 |
faster-whisper>=1.0.3
|
4 |
ffmpeg-python==0.2.0
|
5 |
+
gradio==5.4.0
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|
|
|
23 |
# Needed by ALMA-GGUL
|
24 |
ctransformers[cuda]
|
25 |
# Needed by load_in_4bit parameters in transformers
|
26 |
+
bitsandbytes
|
requirements-whisper.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
ctranslate2>=4.4.0
|
3 |
git+https://github.com/openai/whisper.git
|
4 |
ffmpeg-python==0.2.0
|
5 |
-
gradio==5.0
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|
@@ -23,4 +23,4 @@ optimum
|
|
23 |
# Needed by ALMA-GGUL
|
24 |
ctransformers[cuda]
|
25 |
# Needed by load_in_4bit parameters in transformers
|
26 |
-
bitsandbytes
|
|
|
2 |
ctranslate2>=4.4.0
|
3 |
git+https://github.com/openai/whisper.git
|
4 |
ffmpeg-python==0.2.0
|
5 |
+
gradio==5.4.0
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|
|
|
23 |
# Needed by ALMA-GGUL
|
24 |
ctransformers[cuda]
|
25 |
# Needed by load_in_4bit parameters in transformers
|
26 |
+
bitsandbytes
|
requirements.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
ctranslate2>=4.4.0
|
3 |
faster-whisper>=1.0.3
|
4 |
ffmpeg-python==0.2.0
|
5 |
-
gradio==4.
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|
|
|
2 |
ctranslate2>=4.4.0
|
3 |
faster-whisper>=1.0.3
|
4 |
ffmpeg-python==0.2.0
|
5 |
+
gradio==5.4.0
|
6 |
yt-dlp
|
7 |
json5
|
8 |
torch
|