taham655 commited on
Commit
6fffbf7
1 Parent(s): ae30e6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -9,33 +9,34 @@ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
9
  # Initialize the model outside the main app function to load it only once
10
 
11
 
12
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
14
 
15
- model_id = "distil-whisper/distil-large-v2"
16
-
17
- model = AutoModelForSpeechSeq2Seq.from_pretrained(
18
- model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
19
- )
20
- model.to(device)
21
-
22
- processor = AutoProcessor.from_pretrained(model_id)
23
-
24
- pipe = pipeline(
25
- "automatic-speech-recognition",
26
- model=model,
27
- tokenizer=processor.tokenizer,
28
- feature_extractor=processor.feature_extractor,
29
- max_new_tokens=128,
30
- chunk_length_s=15,
31
- batch_size=16,
32
- torch_dtype=torch_dtype,
33
- device=device,
34
- )
35
 
36
 
37
 
38
  def transcribe_audio(audio_file):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  # Save the audio file to a temporary file
40
  with open("temp_audio_file", "wb") as f:
41
  f.write(audio_file.getbuffer())
 
9
  # Initialize the model outside the main app function to load it only once
10
 
11
 
 
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
 
15
 
16
  def transcribe_audio(audio_file):
17
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
18
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
19
+
20
+ model_id = "distil-whisper/distil-large-v2"
21
+
22
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
23
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
24
+ )
25
+ model.to(device)
26
+
27
+ processor = AutoProcessor.from_pretrained(model_id)
28
+
29
+ pipe = pipeline(
30
+ "automatic-speech-recognition",
31
+ model=model,
32
+ tokenizer=processor.tokenizer,
33
+ feature_extractor=processor.feature_extractor,
34
+ max_new_tokens=128,
35
+ chunk_length_s=15,
36
+ batch_size=16,
37
+ torch_dtype=torch_dtype,
38
+ device=device,
39
+ )
40
  # Save the audio file to a temporary file
41
  with open("temp_audio_file", "wb") as f:
42
  f.write(audio_file.getbuffer())