init
Browse files
model_pyannote_embedding.py
CHANGED
@@ -14,11 +14,14 @@ from pyannote.audio.core.inference import fix_reproducibility, map_with_specific
|
|
14 |
class PyannoteSE:
|
15 |
|
16 |
def __init__(self):
|
17 |
-
|
18 |
-
self.
|
|
|
|
|
|
|
19 |
|
20 |
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
21 |
-
wav = torch.as_tensor(wav.reshape(1, -1))
|
22 |
fix_reproducibility(self.inference.device)
|
23 |
if self.inference.window == "sliding":
|
24 |
return self.inference.slide(wav, sampling_rate, hook=None)
|
@@ -29,5 +32,5 @@ class PyannoteSE:
|
|
29 |
return outputs[0]
|
30 |
|
31 |
return map_with_specifications(
|
32 |
-
self.model.specifications, __first_sample, outputs
|
33 |
)
|
|
|
14 |
class PyannoteSE:
|
15 |
|
16 |
def __init__(self):
|
17 |
+
model = Model.from_pretrained("pyannote/embedding")
|
18 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
+
model.to(self.device)
|
20 |
+
model.eval()
|
21 |
+
self.inference = Inference(model, window="whole")
|
22 |
|
23 |
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
24 |
+
wav = torch.as_tensor(wav.reshape(1, -1)).to(self.device)
|
25 |
fix_reproducibility(self.inference.device)
|
26 |
if self.inference.window == "sliding":
|
27 |
return self.inference.slide(wav, sampling_rate, hook=None)
|
|
|
32 |
return outputs[0]
|
33 |
|
34 |
return map_with_specifications(
|
35 |
+
self.inference.model.specifications, __first_sample, outputs
|
36 |
)
|