from typing import Dict from subprocess import run # install pyannote on the fly since it is incompatible with huggingface_hub > 0.9 run("pip install pyannote.audio==2.0.1", shell=True, check=True) from pyannote.audio import Pipeline from transformers.pipelines.audio_utils import ffmpeg_read import torch SAMPLE_RATE = 16000 class EndpointHandler(): def __init__(self, path=""): # load the model self.pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization") def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]: """ Args: data (:obj:): includes the deserialized audio file as bytes Return: A :obj:`dict`:. base64 encoded image """ # process input inputs = data.pop("inputs", data) parameters = data.pop("parameters", None) # min_speakers=2, max_speakers=5 # prepare pynannote input audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE) audio_tensor= torch.from_numpy(audio_nparray).unsqueeze(0) pyannote_input = {"waveform": audio_tensor, "sample_rate": SAMPLE_RATE} # apply pretrained pipeline # pass inputs with all kwargs in data if parameters is not None: diarization = self.pipeline(pyannote_input, **parameters) else: diarization = self.pipeline(pyannote_input) # postprocess the prediction processed_diarization = [ {"label": str(label), "start": str(segment.start), "stop": str(segment.end)} for segment, _, label in diarization.itertracks(yield_label=True) ] return {"diarization": processed_diarization}