ataiii commited on
Commit
032e68c
1 Parent(s): c58448f

Upload handler.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. handler.py +47 -0
handler.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ from pyannote.audio import Pipeline
3
+ from transformers.pipelines.audio_utils import ffmpeg_read
4
+ import torch
5
+
6
+ SAMPLE_RATE = 16000
7
+
8
+
9
+
10
+ class EndpointHandler():
11
+ def __init__(self, path=""):
12
+ # load the model
13
+ self.pipeline = Pipeline.from_pretrained("philschmid/pyannote-speaker-diarization-endpoint")
14
+
15
+
16
+ def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
17
+ """
18
+ Args:
19
+ data (:obj:):
20
+ includes the deserialized audio file as bytes
21
+ Return:
22
+ A :obj:`dict`:. base64 encoded image
23
+ """
24
+ # process input
25
+ inputs = data.pop("inputs", data)
26
+ parameters = data.pop("parameters", None) # min_speakers=2, max_speakers=5
27
+
28
+
29
+ # prepare pynannote input
30
+ audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)
31
+ audio_tensor= torch.from_numpy(audio_nparray).unsqueeze(0)
32
+ pyannote_input = {"waveform": audio_tensor, "sample_rate": SAMPLE_RATE}
33
+
34
+ # apply pretrained pipeline
35
+ # pass inputs with all kwargs in data
36
+ if parameters is not None:
37
+ diarization = self.pipeline(pyannote_input, **parameters)
38
+ else:
39
+ diarization = self.pipeline(pyannote_input)
40
+
41
+ # postprocess the prediction
42
+ processed_diarization = [
43
+ {"label": str(label), "start": str(segment.start), "stop": str(segment.end)}
44
+ for segment, _, label in diarization.itertracks(yield_label=True)
45
+ ]
46
+
47
+ return {"diarization": processed_diarization}