LAP-DEV commited on
Commit
22205d7
·
verified ·
1 Parent(s): 6132a73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -113
app.py CHANGED
@@ -101,125 +101,125 @@ class App:
101
  cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English",interactive=True)
102
  cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True, visible=True)
103
  dd_file_format = gr.Dropdown(choices=["TXT","SRT"], value="TXT", label="Output format", visible=False, info="Output preview format")
104
-
105
- with gr.Accordion("test", open=False, visible=True):
106
- with gr.Accordion("test2", open=False, visible=True):
107
- cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"], label="Speaker diarization (HuggingFace token required)",interactive=True)
108
-
109
- with gr.Accordion("Diarization options", open=False, visible=True):
110
  tb_hf_token = gr.Text(label="HuggingFace Token", value=diarization_params["hf_token"],
111
  info="This is only needed the first time you download the model. If you already have"
112
  " models, you don't need to enter. To download the model, you must manually go "
113
  "to https://huggingface.co/pyannote/speaker-diarization-3.1 and agree to"
114
  " their requirement.")
115
- dd_diarization_device = gr.Dropdown(label="Device",
116
- choices=self.whisper_inf.diarizer.get_available_device(),
117
- value=self.whisper_inf.diarizer.get_device())
118
-
119
- with gr.Accordion("Advanced options", open=False):
120
- nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
121
- info="Beam size to use for decoding.")
122
- nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
123
- info="If the average log probability over sampled tokens is below this value, treat as failed.")
124
- nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
125
- info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
126
- dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
127
- value=self.whisper_inf.current_compute_type, interactive=True,
128
- allow_custom_value=True,
129
- info="Select the type of computation to perform.")
130
- nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
131
- info="Number of candidates when sampling with non-zero temperature.")
132
- nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
133
- info="Beam search patience factor.")
134
- cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
135
- interactive=True,
136
- info="Condition on previous text during decoding.")
137
- sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
138
- minimum=0, maximum=1, step=0.01, interactive=True,
139
- info="Resets prompt if temperature is above this value."
140
- " Arg has effect only if 'Condition On Previous Text' is True.")
141
- tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
142
- info="Initial prompt to use for decoding.")
143
- sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
144
- step=0.01, maximum=1.0, interactive=True,
145
- info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
146
- nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
147
- interactive=True,
148
- info="If the gzip compression ratio is above this value, treat as failed.")
149
- nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
150
- precision=0,
151
- info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
152
- with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
153
- nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
154
- info="Exponential length penalty constant.")
155
- nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
156
- info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
157
- nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
158
- precision=0,
159
- info="Prevent repetitions of n-grams with this size (set 0 to disable).")
160
- tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
161
- info="Optional text to provide as a prefix for the first window.")
162
- cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
163
- info="Suppress blank outputs at the beginning of the sampling.")
164
- tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
165
- info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
166
- nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
167
- info="The initial timestamp cannot be later than this.")
168
- cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
169
- info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
170
- tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
171
- info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
172
- tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
173
- info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
174
- nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
175
- precision=0,
176
- info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
177
- nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
178
- value=lambda: whisper_params["hallucination_silence_threshold"],
179
- info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
180
- tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
181
- info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
182
- nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
183
- info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
184
- nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
185
- precision=0,
186
- info="Number of segments to consider for the language detection.")
187
- with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
188
- nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
189
-
190
- with gr.Accordion("Background Music Remover Filter", open=False):
191
- cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  interactive=True,
193
- info="Enabling this will remove background music by submodel before"
194
- " transcribing ")
195
- dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
196
- choices=self.whisper_inf.music_separator.available_devices)
197
- dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
198
- choices=self.whisper_inf.music_separator.available_models)
199
- nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
200
- cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"])
201
- cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",
202
- value=uvr_params["enable_offload"])
203
-
204
- with gr.Accordion("Voice Detection Filter", open=False):
205
- cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
206
- interactive=True,
207
- info="Enable this to transcribe only detected voice parts by submodel.")
208
- sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
209
- value=vad_params["threshold"],
210
- info="Lower it to be more sensitive to small sounds.")
211
- nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
212
- value=vad_params["min_speech_duration_ms"],
213
- info="Final speech chunks shorter than this time are thrown out")
214
- nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
215
- value=vad_params["max_speech_duration_s"],
216
- info="Maximum duration of speech chunks in \"seconds\".")
217
- nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
218
- value=vad_params["min_silence_duration_ms"],
219
- info="In the end of each speech chunk wait for this time"
220
- " before separating it")
221
- nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
222
- info="Final speech chunks are padded by this time each side")
223
 
224
  #dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
225
 
 
101
  cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English",interactive=True)
102
  cb_timestamp = gr.Checkbox(value=whisper_params["add_timestamp"], label="Add timestamp to output file",interactive=True, visible=True)
103
  dd_file_format = gr.Dropdown(choices=["TXT","SRT"], value="TXT", label="Output format", visible=False, info="Output preview format")
104
+
105
+ with gr.Row():
106
+ cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"], label="Speaker diarization (HuggingFace token required)",interactive=True)
 
 
 
107
  tb_hf_token = gr.Text(label="HuggingFace Token", value=diarization_params["hf_token"],
108
  info="This is only needed the first time you download the model. If you already have"
109
  " models, you don't need to enter. To download the model, you must manually go "
110
  "to https://huggingface.co/pyannote/speaker-diarization-3.1 and agree to"
111
  " their requirement.")
112
+
113
+ with gr.Accordion("Advanced options", open=False, visible=True):
114
+ with gr.Accordion("Advanced diarization options", open=False, visible=True):
115
+ dd_diarization_device = gr.Dropdown(label="Device",
116
+ choices=self.whisper_inf.diarizer.get_available_device(),
117
+ value=self.whisper_inf.diarizer.get_device())
118
+
119
+ with gr.Accordion("Advanced processing options", open=False):
120
+ nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
121
+ info="Beam size to use for decoding.")
122
+ nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
123
+ info="If the average log probability over sampled tokens is below this value, treat as failed.")
124
+ nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
125
+ info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
126
+ dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
127
+ value=self.whisper_inf.current_compute_type, interactive=True,
128
+ allow_custom_value=True,
129
+ info="Select the type of computation to perform.")
130
+ nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
131
+ info="Number of candidates when sampling with non-zero temperature.")
132
+ nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
133
+ info="Beam search patience factor.")
134
+ cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
135
+ interactive=True,
136
+ info="Condition on previous text during decoding.")
137
+ sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
138
+ minimum=0, maximum=1, step=0.01, interactive=True,
139
+ info="Resets prompt if temperature is above this value."
140
+ " Arg has effect only if 'Condition On Previous Text' is True.")
141
+ tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
142
+ info="Initial prompt to use for decoding.")
143
+ sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
144
+ step=0.01, maximum=1.0, interactive=True,
145
+ info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
146
+ nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
147
+ interactive=True,
148
+ info="If the gzip compression ratio is above this value, treat as failed.")
149
+ nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
150
+ precision=0,
151
+ info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
152
+ with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
153
+ nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
154
+ info="Exponential length penalty constant.")
155
+ nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
156
+ info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
157
+ nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
158
+ precision=0,
159
+ info="Prevent repetitions of n-grams with this size (set 0 to disable).")
160
+ tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
161
+ info="Optional text to provide as a prefix for the first window.")
162
+ cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
163
+ info="Suppress blank outputs at the beginning of the sampling.")
164
+ tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
165
+ info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
166
+ nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
167
+ info="The initial timestamp cannot be later than this.")
168
+ cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
169
+ info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
170
+ tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
171
+ info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
172
+ tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
173
+ info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
174
+ nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
175
+ precision=0,
176
+ info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
177
+ nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
178
+ value=lambda: whisper_params["hallucination_silence_threshold"],
179
+ info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
180
+ tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
181
+ info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
182
+ nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
183
+ info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
184
+ nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
185
+ precision=0,
186
+ info="Number of segments to consider for the language detection.")
187
+ with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
188
+ nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
189
+
190
+ with gr.Accordion("Background Music Remover Filter", open=False):
191
+ cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
192
+ interactive=True,
193
+ info="Enabling this will remove background music by submodel before"
194
+ " transcribing ")
195
+ dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
196
+ choices=self.whisper_inf.music_separator.available_devices)
197
+ dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
198
+ choices=self.whisper_inf.music_separator.available_models)
199
+ nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
200
+ cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"])
201
+ cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",
202
+ value=uvr_params["enable_offload"])
203
+
204
+ with gr.Accordion("Voice Detection Filter", open=False):
205
+ cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
206
  interactive=True,
207
+ info="Enable this to transcribe only detected voice parts by submodel.")
208
+ sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
209
+ value=vad_params["threshold"],
210
+ info="Lower it to be more sensitive to small sounds.")
211
+ nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
212
+ value=vad_params["min_speech_duration_ms"],
213
+ info="Final speech chunks shorter than this time are thrown out")
214
+ nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
215
+ value=vad_params["max_speech_duration_s"],
216
+ info="Maximum duration of speech chunks in \"seconds\".")
217
+ nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
218
+ value=vad_params["min_silence_duration_ms"],
219
+ info="In the end of each speech chunk wait for this time"
220
+ " before separating it")
221
+ nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
222
+ info="Final speech chunks are padded by this time each side")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
  #dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
225