keess commited on
Commit
e5983bf
1 Parent(s): 1d5d732

- add custom endpoint handler

Browse files
Files changed (1) hide show
  1. handler.py +13 -13
handler.py CHANGED
@@ -16,23 +16,23 @@ from datasets import Audio, Dataset
16
 
17
  class EndpointHandler():
18
 
19
- model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
20
- model='silero_vad', force_reload=False, onnx=True)
21
 
22
- (get_speech_timestamps,
23
- _, read_audio,
24
- *_) = utils
25
 
26
 
27
 
28
  def __init__(self, path=""):
29
  device = 0 if torch.cuda.is_available() else "cpu"
30
- self.pipe = pipeline(
31
- task="automatic-speech-recognition",
32
- model="openai/whisper-large",
33
- chunk_length_s=30,
34
- device=device,
35
- )
36
  self.processor = WhisperProcessor.from_pretrained("openai/whisper-large")
37
  self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
38
  self.model.config.forced_decoder_ids = self.processor.get_decoder_prompt_ids(language="nl", task="transcribe")
@@ -59,10 +59,10 @@ class EndpointHandler():
59
  ds = pd.DataFrame(data, columns=['audio'])
60
  ds = Dataset.from_pandas(ds)
61
  # load dummy dataset and read soundfiles
62
- ds = ds.cast_column("audio", Audio(sampling_rate=32_000))
63
  input_speech = next(iter(ds))["audio"]["array"]
64
  input_features = self.processor(input_speech, return_tensors="pt").input_features
65
- predicted_ids = self.model.generate(input_features)
66
  transcription = self.processor.batch_decode(predicted_ids)
67
  print("this is the description")
68
  print(transcription)
 
16
 
17
  class EndpointHandler():
18
 
19
+ # model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
20
+ # model='silero_vad', force_reload=False, onnx=True)
21
 
22
+ # (get_speech_timestamps,
23
+ # _, read_audio,
24
+ # *_) = utils
25
 
26
 
27
 
28
  def __init__(self, path=""):
29
  device = 0 if torch.cuda.is_available() else "cpu"
30
+ # self.pipe = pipeline(
31
+ # task="automatic-speech-recognition",
32
+ # model="openai/whisper-large",
33
+ # # chunk_length_s=30,
34
+ # device=device,
35
+ # )
36
  self.processor = WhisperProcessor.from_pretrained("openai/whisper-large")
37
  self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
38
  self.model.config.forced_decoder_ids = self.processor.get_decoder_prompt_ids(language="nl", task="transcribe")
 
59
  ds = pd.DataFrame(data, columns=['audio'])
60
  ds = Dataset.from_pandas(ds)
61
  # load dummy dataset and read soundfiles
62
+ ds = ds.cast_column("audio", Audio(sampling_rate=16_000))
63
  input_speech = next(iter(ds))["audio"]["array"]
64
  input_features = self.processor(input_speech, return_tensors="pt").input_features
65
+ predicted_ids = self.model.generate(input_features, forced_decoder_ids=self.model.config.forced_decoder_ids)
66
  transcription = self.processor.batch_decode(predicted_ids)
67
  print("this is the description")
68
  print(transcription)