Spaces:
Running
on
T4
Running
on
T4
jhj0517
commited on
Commit
•
7b34aed
1
Parent(s):
e4787f9
fix bug
Browse files- app.py +6 -8
- modules/faster_whisper_inference.py +1 -1
- modules/nllb_inference.py +1 -1
- modules/whisper_Inference.py +1 -1
- requirements.txt +1 -2
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
-
import tensorflow as tf
|
4 |
|
5 |
import gradio as gr
|
6 |
import os
|
@@ -20,14 +19,13 @@ class App:
|
|
20 |
self.app = gr.Blocks(css=CSS, theme=self.args.theme)
|
21 |
#self.whisper_inf = WhisperInference() if self.args.disable_faster_whisper else FasterWhisperInference()
|
22 |
self.whisper_inf = WhisperInference()
|
|
|
23 |
print("Use Open AI Whisper implementation")
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
# tf_version = tf.__version__
|
30 |
-
# print(f"tensorflow version: {tf_version}")
|
31 |
self.nllb_inf = NLLBInference()
|
32 |
self.deepl_api = DeepLAPI()
|
33 |
|
|
|
1 |
import spaces
|
2 |
import torch
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import os
|
|
|
19 |
self.app = gr.Blocks(css=CSS, theme=self.args.theme)
|
20 |
#self.whisper_inf = WhisperInference() if self.args.disable_faster_whisper else FasterWhisperInference()
|
21 |
self.whisper_inf = WhisperInference()
|
22 |
+
# NOTE: Faster whisper is not able to use in HuggingFace space. see more info : https://huggingface.co/spaces/jhj0517/Whisper-WebUI/discussions/1
|
23 |
print("Use Open AI Whisper implementation")
|
24 |
+
print(f"Device \"{self.whisper_inf.device}\" is detected")
|
25 |
+
cuda_version = torch.version.cuda
|
26 |
+
print(f"CUDA version: {cuda_version}")
|
27 |
+
cudnn_version = torch.backends.cudnn.version()
|
28 |
+
print(f"cuDNN version: {cudnn_version}")
|
|
|
|
|
29 |
self.nllb_inf = NLLBInference()
|
30 |
self.deepl_api = DeepLAPI()
|
31 |
|
modules/faster_whisper_inference.py
CHANGED
@@ -26,7 +26,7 @@ class FasterWhisperInference(BaseInterface):
|
|
26 |
self.available_models = whisper.available_models()
|
27 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
28 |
self.translatable_models = ["large", "large-v1", "large-v2", "large-v3"]
|
29 |
-
self.device = "cuda"
|
30 |
self.available_compute_types = ["int8_float16", "float32", "bfloat16", "float16", "int8", "int8_float32", "int8_bfloat16"]
|
31 |
self.current_compute_type = "float16"
|
32 |
self.default_beam_size = 1
|
|
|
26 |
self.available_models = whisper.available_models()
|
27 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
28 |
self.translatable_models = ["large", "large-v1", "large-v2", "large-v3"]
|
29 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
self.available_compute_types = ["int8_float16", "float32", "bfloat16", "float16", "int8", "int8_float32", "int8_bfloat16"]
|
31 |
self.current_compute_type = "float16"
|
32 |
self.default_beam_size = 1
|
modules/nllb_inference.py
CHANGED
@@ -23,7 +23,7 @@ class NLLBInference(BaseInterface):
|
|
23 |
self.available_models = NLLB_MODELS
|
24 |
self.available_source_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
25 |
self.available_target_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
26 |
-
self.device = 0
|
27 |
self.pipeline = None
|
28 |
|
29 |
def translate_text(self, text):
|
|
|
23 |
self.available_models = NLLB_MODELS
|
24 |
self.available_source_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
25 |
self.available_target_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
26 |
+
self.device = 0 if torch.cuda.is_available() else 1
|
27 |
self.pipeline = None
|
28 |
|
29 |
def translate_text(self, text):
|
modules/whisper_Inference.py
CHANGED
@@ -23,7 +23,7 @@ class WhisperInference(BaseInterface):
|
|
23 |
self.model = None
|
24 |
self.available_models = whisper.available_models()
|
25 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
26 |
-
self.device = "cuda"
|
27 |
self.available_compute_types = ["float16", "float32"]
|
28 |
self.current_compute_type = "float16" if self.device == "cuda" else "float32"
|
29 |
self.default_beam_size = 1
|
|
|
23 |
self.model = None
|
24 |
self.available_models = whisper.available_models()
|
25 |
self.available_langs = sorted(list(whisper.tokenizer.LANGUAGES.values()))
|
26 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
27 |
self.available_compute_types = ["float16", "float32"]
|
28 |
self.current_compute_type = "float16" if self.device == "cuda" else "float32"
|
29 |
self.default_beam_size = 1
|
requirements.txt
CHANGED
@@ -4,5 +4,4 @@ git+https://github.com/jhj0517/jhj0517-whisper.git
|
|
4 |
faster-whisper
|
5 |
transformers
|
6 |
gradio==4.14.0
|
7 |
-
pytube
|
8 |
-
tensorflow
|
|
|
4 |
faster-whisper
|
5 |
transformers
|
6 |
gradio==4.14.0
|
7 |
+
pytube
|
|