aadnk commited on
Commit
8063dc1
2 Parent(s): 501db69 102d1ea

Merge branch 'main' of https://huggingface.co/spaces/aadnk/whisper-webui

Browse files
README.md CHANGED
@@ -54,11 +54,15 @@ When more than one file is processed, the UI will also generate a "All_Output" z
54
 
55
  ## Diarization
56
 
57
- To detect different speakers in the audio, you can use the [whisper-diarization](https://gitlab.com/aadnk/whisper-diarization) application.
58
 
59
  Download the JSON file after running Whisper on an audio file, and then run app.py in the
60
  whisper-diarization repository with the audio file and the JSON file as arguments.
61
 
 
 
 
 
62
  ## Whisper Implementation
63
 
64
  You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the
 
54
 
55
  ## Diarization
56
 
57
+ To detect different speakers in the audio, you can use the [whisper-diarization](https://gitlab.com/aadnk/whisper-diarization) application, or check "Diarization" in the options.
58
 
59
  Download the JSON file after running Whisper on an audio file, and then run app.py in the
60
  whisper-diarization repository with the audio file and the JSON file as arguments.
61
 
62
+ ## Translation
63
+
64
+ To translate the transcript to English, set the task to "Translate". You can also use ChatGPT for this task via my [translate-gpt](https://gitlab.com/aadnk/translate-gpt) CLI application.
65
+
66
  ## Whisper Implementation
67
 
68
  You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the
config.json5 CHANGED
@@ -22,10 +22,18 @@
22
  "name": "large",
23
  "url": "large"
24
  },
 
 
 
 
25
  {
26
  "name": "large-v2",
27
  "url": "large-v2"
28
  },
 
 
 
 
29
  // Uncomment to add custom Japanese models
30
  // NOTE: For Faster-Whisper, the models must be converted to the CTranslate2 format,
31
  // see https://github.com/guillaumekln/faster-whisper#model-conversion
@@ -153,4 +161,4 @@
153
  "diarization_max_speakers": 8,
154
  // The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.
155
  "diarization_process_timeout": 60,
156
- }
 
22
  "name": "large",
23
  "url": "large"
24
  },
25
+ {
26
+ "name": "large-v1",
27
+ "url": "large-v1"
28
+ },
29
  {
30
  "name": "large-v2",
31
  "url": "large-v2"
32
  },
33
+ {
34
+ "name": "large-v3",
35
+ "url": "large-v3"
36
+ },
37
  // Uncomment to add custom Japanese models
38
  // NOTE: For Faster-Whisper, the models must be converted to the CTranslate2 format,
39
  // see https://github.com/guillaumekln/faster-whisper#model-conversion
 
161
  "diarization_max_speakers": 8,
162
  // The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.
163
  "diarization_process_timeout": 60,
164
+ }
src/whisper/fasterWhisperContainer.py CHANGED
@@ -43,11 +43,11 @@ class FasterWhisperContainer(AbstractWhisperContainer):
43
  model_url = model_config.url
44
 
45
  if model_config.type == "whisper":
46
- if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]:
47
  raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
48
  if model_url == "large":
49
- # large is an alias for large-v1
50
- model_url = "large-v1"
51
 
52
  device = self.device
53
 
 
43
  model_url = model_config.url
44
 
45
  if model_config.type == "whisper":
46
+ if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2", "large-v3"]:
47
  raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
48
  if model_url == "large":
49
+ # large is an alias for large-v3
50
+ model_url = "large-v3"
51
 
52
  device = self.device
53