from typing import Dict from transformers import WhisperProcessor, WhisperForConditionalGeneration from transformers.pipelines.audio_utils import ffmpeg_read import Torch #from datasets import load_dataset SAMPLE_RATE = 16000 class EndpointHandler(): def __init__(self, path=""): # load the model self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") self.classifier = AudioClassificationPipeline(model=self.model, processor=self.processor, device=0) self.forced_decoder_ids = self.processor.get_decoder_prompt_ids(language="Danish", task="transcribe") def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]: """ Args: data (:obj:): includes the deserialized audio file as bytes Return: A :obj:`dict`:. base64 encoded image """ # process input inputs = data.pop("inputs", data) audio_nparray = ffmpeg_read(inputs, sample_rate=SAMPLE_RATE) audio_tensor= torch.from_numpy(audio_nparray) # run inference pipeline result = self.classifier(audio_nparray) # postprocess the prediction return {"txt": result[0]["transcription"]}