from typing import Dict from transformers import WhisperProcessor, WhisperForConditionalGeneration from transformers.pipelines.audio_utils import ffmpeg_read from datasets import load_dataset import torch SAMPLE_RATE = 16000 class EndpointHandler(): def __init__(self, path=""): # load the model #self.model = whisper.load_model("medium") self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") self.forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="transcribe") def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]: """ Args: data (:obj:): includes the deserialized audio file as bytes Return: A :obj:`dict`:. base64 encoded image """ # process input inputs = data.pop("inputs", data) audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE) audio_tensor= torch.from_numpy(audio_nparray) #ds = load_dataset("common_voice", "fr", split="test", streaming=True) #ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) #input_speech = next(iter(ds))["audio"] #input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features # run inference pipeline result = self.model.transcribe(audio_nparray) # postprocess the prediction return {"text": result["text"]}