S-Fry commited on
Commit
ea421c5
1 Parent(s): e9716f0

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -16
handler.py CHANGED
@@ -1,8 +1,7 @@
1
- from typing import Dict
2
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
3
- from transformers.pipelines.audio_utils import ffmpeg_read
4
- #from datasets import load_dataset
5
-
6
  import torch
7
 
8
  SAMPLE_RATE = 16000
@@ -10,11 +9,10 @@ SAMPLE_RATE = 16000
10
  class EndpointHandler():
11
  def __init__(self, path=""):
12
  # load the model
13
- #self.model = whisper.load_model("medium")
14
  self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
15
  self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
16
- self.forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="transcribe")
17
-
18
 
19
  def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
20
  """
@@ -26,17 +24,11 @@ class EndpointHandler():
26
  """
27
  # process input
28
  inputs = data.pop("inputs", data)
29
- audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)
30
  audio_tensor= torch.from_numpy(audio_nparray)
31
 
32
- #ds = load_dataset("common_voice", "fr", split="test", streaming=True)
33
- #ds = ds.cast_column("audio", Audio(sampling_rate=16_000))
34
- #input_speech = next(iter(ds))["audio"]
35
- #input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features
36
-
37
-
38
  # run inference pipeline
39
- result = self.model.transcribe(audio_nparray)
40
 
41
  # postprocess the prediction
42
- return {"text": result["text"]}
 
1
+ from typing import Dict
2
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
3
+ from transformers.pipelines.audio import AudioClassificationPipeline
4
+ from datasets import load_dataset
 
5
  import torch
6
 
7
  SAMPLE_RATE = 16000
 
9
  class EndpointHandler():
10
  def __init__(self, path=""):
11
  # load the model
 
12
  self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
13
  self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
14
+ self.classifier = AudioClassificationPipeline(model=self.model, processor=self.processor, device=0)
15
+ self.forced_decoder_ids = self.processor.get_decoder_prompt_ids(language="Danish", task="transcribe")
16
 
17
  def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
18
  """
 
24
  """
25
  # process input
26
  inputs = data.pop("inputs", data)
27
+ audio_nparray = ffmpeg_read(inputs, sample_rate=SAMPLE_RATE)
28
  audio_tensor= torch.from_numpy(audio_nparray)
29
 
 
 
 
 
 
 
30
  # run inference pipeline
31
+ result = self.classifier(audio_nparray)
32
 
33
  # postprocess the prediction
34
+ return {"txt": result[0]["transcription"]}