Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -51,7 +51,7 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
51
  use_flash_attention_2 = is_flash_attn_2_available()
52
 
53
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
54
- "openai/whisper-small", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
55
  )
56
  distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
57
  "models/kanyekuthi/AfriSpeech-whisper-tiny", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
@@ -62,7 +62,7 @@ if not use_flash_attention_2:
62
  model = model.to_bettertransformer()
63
  distilled_model = distilled_model.to_bettertransformer()
64
 
65
- processor = AutoProcessor.from_pretrained("openai/whisper-small")
66
 
67
  model.to(device)
68
  distilled_model.to(device)
@@ -205,10 +205,10 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
205
  use_flash_attention_2 = is_flash_attn_2_available()
206
 
207
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
208
- "openai/whisper-small", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
209
  )
210
  distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
211
- "distil-whisper/distil-small", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
212
  )
213
 
214
  if not use_flash_attention_2:
@@ -216,7 +216,7 @@ if not use_flash_attention_2:
216
  model = model.to_bettertransformer()
217
  distilled_model = distilled_model.to_bettertransformer()
218
 
219
- processor = AutoProcessor.from_pretrained("openai/whisper-small")
220
 
221
  model.to(device)
222
  distilled_model.to(device)
 
51
  use_flash_attention_2 = is_flash_attn_2_available()
52
 
53
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
54
+ "models/kanyekuthi/AfriSpeech-whisper-tiny", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
55
  )
56
  distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
57
  "models/kanyekuthi/AfriSpeech-whisper-tiny", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
 
62
  model = model.to_bettertransformer()
63
  distilled_model = distilled_model.to_bettertransformer()
64
 
65
+ processor = AutoProcessor.from_pretrained("models/kanyekuthi/AfriSpeech-whisper-tiny")
66
 
67
  model.to(device)
68
  distilled_model.to(device)
 
205
  use_flash_attention_2 = is_flash_attn_2_available()
206
 
207
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
208
+ "models/kanyekuthi/AfriSpeech-whisper-tiny", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
209
  )
210
  distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
211
+ "models/kanyekuthi/AfriSpeech-whisper-tiny", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
212
  )
213
 
214
  if not use_flash_attention_2:
 
216
  model = model.to_bettertransformer()
217
  distilled_model = distilled_model.to_bettertransformer()
218
 
219
+ processor = AutoProcessor.from_pretrained("models/kanyekuthi/AfriSpeech-whisper-tiny")
220
 
221
  model.to(device)
222
  distilled_model.to(device)