sanchit-gandhi HF staff commited on
Commit
b4928b3
1 Parent(s): 6d71260

use bare except

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -18,7 +18,7 @@ title = "Whisper JAX: The Fastest Whisper API ⚡️"
18
 
19
  description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available.
20
 
21
- Note that at peak times, you may find yourself in the queue for this demo. When you submit a request, your queue position will be shown in the top right-hand side of the demo pane. Once you reach the front of the queue, your audio file will be transcribed, with the progress displayed through a progress bar.
22
 
23
  To skip the queue, you may wish to create your own inference endpoint, details for which can be found in the [Whisper JAX repository](https://github.com/sanchit-gandhi/whisper-jax#creating-an-endpoint).
24
  """
@@ -101,7 +101,9 @@ if __name__ == "__main__":
101
 
102
  dataloader = processor.preprocess_batch(inputs, chunk_length_s=CHUNK_LENGTH_S, batch_size=BATCH_SIZE)
103
  progress(0, desc="Sending audio to TPU...")
104
- batch_id = np.random.randint(1000000) # TODO(SG): swap to an iterator
 
 
105
  pool.map(partial(send_chunks, batch_id=batch_id), dataloader)
106
 
107
  model_outputs = []
@@ -154,7 +156,7 @@ if __name__ == "__main__":
154
  try:
155
  yt = pytube.YouTube(yt_url)
156
  stream = yt.streams.filter(only_audio=True)[0]
157
- except KeyError:
158
  raise gr.Error("An error occurred while loading the YouTube video. Please try again.")
159
 
160
  if stream.filesize_mb > max_filesize:
 
18
 
19
  description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available.
20
 
21
+ Note that at peak times, you may find yourself in the queue for this demo. When you submit a request, your queue position will be shown in the top right-hand side of the demo pane. Once you reach the front of the queue, your audio file will be sent to the TPU and then transcribed, with the progress displayed through a progress bar.
22
 
23
  To skip the queue, you may wish to create your own inference endpoint, details for which can be found in the [Whisper JAX repository](https://github.com/sanchit-gandhi/whisper-jax#creating-an-endpoint).
24
  """
 
101
 
102
  dataloader = processor.preprocess_batch(inputs, chunk_length_s=CHUNK_LENGTH_S, batch_size=BATCH_SIZE)
103
  progress(0, desc="Sending audio to TPU...")
104
+ batch_id = np.random.randint(
105
+ 1000000
106
+ ) # TODO(SG): swap to an iterator - currently taking our 1 in a million chances
107
  pool.map(partial(send_chunks, batch_id=batch_id), dataloader)
108
 
109
  model_outputs = []
 
156
  try:
157
  yt = pytube.YouTube(yt_url)
158
  stream = yt.streams.filter(only_audio=True)[0]
159
+ except:
160
  raise gr.Error("An error occurred while loading the YouTube video. Please try again.")
161
 
162
  if stream.filesize_mb > max_filesize: