init
Browse files- .DS_Store +0 -0
- experiment_cache/.DS_Store +0 -0
- experiment_cache/cluster/xlsr_se.expresso.speaker_id.csv +0 -3
- experiment_cache/cluster/xlsr_se.expresso.style.csv +0 -3
- experiment_cache/cluster/xlsr_se.voxceleb1-test-split.speaker_id.csv +0 -3
- experiment_cache/embeddings/xlsr_se.expresso.json +0 -3
- experiment_cache/embeddings/xlsr_se.voxceleb1-test-split.json +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.expresso.speaker_id.png +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.expresso.style.png +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.voxceleb1-test-split.speaker_id.png +0 -3
- experiment_cache/tsne/xlsr_se.expresso.speaker_id.npy +0 -3
- experiment_cache/tsne/xlsr_se.expresso.style.npy +0 -3
- experiment_cache/tsne/xlsr_se.voxceleb1-test-split.speaker_id.npy +0 -3
- experiment_speaker_verification.py +32 -20
- model_clap.py +2 -2
- model_hubert.py +34 -0
- model_meta_voice.py +1 -1
- model_pyannote_embedding.py +1 -1
- model_w2v_bert.py +1 -1
- model_xls.py +2 -2
- test.py +16 -13
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
experiment_cache/.DS_Store
CHANGED
Binary files a/experiment_cache/.DS_Store and b/experiment_cache/.DS_Store differ
|
|
experiment_cache/cluster/xlsr_se.expresso.speaker_id.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2342672f92b65d5671dfb487b78bc5e9cdba199026fa97bcadd778a64a80a210
|
3 |
-
size 297705
|
|
|
|
|
|
|
|
experiment_cache/cluster/xlsr_se.expresso.style.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ffbf6f8cae2bae0a5175f9f87f4043f34ac672325a96410c472ec54518cec974
|
3 |
-
size 331118
|
|
|
|
|
|
|
|
experiment_cache/cluster/xlsr_se.voxceleb1-test-split.speaker_id.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:cc2b92ab66dd62bda0ccd046ec27ea1c738acfe718adbc748b289f6cab1dc630
|
3 |
-
size 184308
|
|
|
|
|
|
|
|
experiment_cache/embeddings/xlsr_se.expresso.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c4c78cadd070fab6f469defae141507f0307dbb4f59990757c533631fd6addb8
|
3 |
-
size 192055814
|
|
|
|
|
|
|
|
experiment_cache/embeddings/xlsr_se.voxceleb1-test-split.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5e34ad588ede7c38ac3591c97e1fc9a8057ca46f61a83d1ad2806601acc775e8
|
3 |
-
size 80340078
|
|
|
|
|
|
|
|
experiment_cache/figure/2d.latent_space.xlsr_se.expresso.speaker_id.png
DELETED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.xlsr_se.expresso.style.png
DELETED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.xlsr_se.voxceleb1-test-split.speaker_id.png
DELETED
Git LFS Details
|
experiment_cache/tsne/xlsr_se.expresso.speaker_id.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3eb6fcb69c1efb925dabce36b1aefa24f4a87a66029336257d64300590646ba6
|
3 |
-
size 93048
|
|
|
|
|
|
|
|
experiment_cache/tsne/xlsr_se.expresso.style.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3eb6fcb69c1efb925dabce36b1aefa24f4a87a66029336257d64300590646ba6
|
3 |
-
size 93048
|
|
|
|
|
|
|
|
experiment_cache/tsne/xlsr_se.voxceleb1-test-split.speaker_id.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e165cffb996e8df023cb1c2a0f4d7173a89b62acdb8b65b9ee33406855248322
|
3 |
-
size 39120
|
|
|
|
|
|
|
|
experiment_speaker_verification.py
CHANGED
@@ -14,11 +14,13 @@ from sklearn.manifold import TSNE
|
|
14 |
import pandas as pd
|
15 |
from datasets import load_dataset
|
16 |
|
17 |
-
from model_meta_voice import
|
18 |
-
from model_pyannote_embedding import
|
19 |
-
from model_w2v_bert import
|
20 |
-
from model_clap import
|
21 |
-
from model_xls import
|
|
|
|
|
22 |
|
23 |
|
24 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
@@ -116,19 +118,23 @@ def analyze_embedding(model_name: str, dataset_name: str, n_shot: int = 5, n_cro
|
|
116 |
|
117 |
|
118 |
if __name__ == '__main__':
|
119 |
-
# get_embedding(
|
120 |
-
# get_embedding(
|
121 |
-
# get_embedding(
|
122 |
-
# get_embedding(
|
123 |
-
# get_embedding(
|
124 |
-
get_embedding(
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
# get_embedding(
|
129 |
-
# get_embedding(
|
130 |
-
# get_embedding(
|
131 |
-
get_embedding(
|
|
|
|
|
|
|
|
|
132 |
|
133 |
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
134 |
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
@@ -136,20 +142,26 @@ if __name__ == '__main__':
|
|
136 |
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
137 |
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
138 |
cluster_embedding("xlsr_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
139 |
-
|
|
|
|
|
140 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
141 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
142 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
143 |
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
144 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
145 |
cluster_embedding("xlsr_se", "ylacombe/expresso", "speaker_id")
|
146 |
-
|
|
|
|
|
147 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
148 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
149 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
150 |
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
151 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
152 |
cluster_embedding("xlsr_se", "ylacombe/expresso", "style")
|
|
|
|
|
153 |
|
154 |
|
155 |
|
|
|
14 |
import pandas as pd
|
15 |
from datasets import load_dataset
|
16 |
|
17 |
+
from model_meta_voice import MetaVoiceEmbedding
|
18 |
+
from model_pyannote_embedding import PyannoteEmbedding
|
19 |
+
from model_w2v_bert import W2VBERTEmbedding
|
20 |
+
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
|
21 |
+
from model_xls import XLSREmbedding
|
22 |
+
from model_hubert import HuBERTXLEmbedding, HuBERTLargeEmbedding
|
23 |
+
|
24 |
|
25 |
|
26 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
|
|
118 |
|
119 |
|
120 |
if __name__ == '__main__':
|
121 |
+
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
+
# get_embedding(PyannoteEmbedding, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
+
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
124 |
+
# get_embedding(CLAPEmbedding, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
125 |
+
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
126 |
+
get_embedding(XLSREmbedding, "xlsr_se", "asahi417/voxceleb1-test-split", "test")
|
127 |
+
get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "asahi417/voxceleb1-test-split", "test")
|
128 |
+
get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "asahi417/voxceleb1-test-split", "test")
|
129 |
+
|
130 |
+
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "ylacombe/expresso", "train")
|
131 |
+
# get_embedding(PyannoteEmbedding, "pyannote_se", "ylacombe/expresso", "train")
|
132 |
+
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "ylacombe/expresso", "train")
|
133 |
+
# get_embedding(CLAPEmbedding, "clap_se", "ylacombe/expresso", "train")
|
134 |
+
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "ylacombe/expresso", "train")
|
135 |
+
get_embedding(XLSREmbedding, "xlsr_se", "ylacombe/expresso", "train")
|
136 |
+
get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "ylacombe/expresso", "train")
|
137 |
+
get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "ylacombe/expresso", "train")
|
138 |
|
139 |
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
140 |
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
|
|
142 |
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
143 |
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
144 |
cluster_embedding("xlsr_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
145 |
+
cluster_embedding("hubert_large_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
146 |
+
cluster_embedding("hubert_xl_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
147 |
+
|
148 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
149 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
150 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
151 |
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
152 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
153 |
cluster_embedding("xlsr_se", "ylacombe/expresso", "speaker_id")
|
154 |
+
cluster_embedding("hubert_large_se", "ylacombe/expresso", "speaker_id")
|
155 |
+
cluster_embedding("hubert_xl_se", "ylacombe/expresso", "speaker_id")
|
156 |
+
|
157 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
158 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
159 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
160 |
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
161 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
162 |
cluster_embedding("xlsr_se", "ylacombe/expresso", "style")
|
163 |
+
cluster_embedding("hubert_large_se", "ylacombe/expresso", "style")
|
164 |
+
cluster_embedding("hubert_xl_se", "ylacombe/expresso", "style")
|
165 |
|
166 |
|
167 |
|
model_clap.py
CHANGED
@@ -10,7 +10,7 @@ import numpy as np
|
|
10 |
from transformers import ClapModel, ClapProcessor
|
11 |
|
12 |
|
13 |
-
class
|
14 |
def __init__(self, ckpt: str = "laion/larger_clap_music_and_speech"):
|
15 |
self.model = ClapModel.from_pretrained(ckpt)
|
16 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -29,7 +29,7 @@ class ClapSE:
|
|
29 |
return outputs.cpu().numpy()[0]
|
30 |
|
31 |
|
32 |
-
class
|
33 |
|
34 |
def __init__(self):
|
35 |
super().__init__(ckpt="laion/larger_clap_general")
|
|
|
10 |
from transformers import ClapModel, ClapProcessor
|
11 |
|
12 |
|
13 |
+
class CLAPEmbedding:
|
14 |
def __init__(self, ckpt: str = "laion/larger_clap_music_and_speech"):
|
15 |
self.model = ClapModel.from_pretrained(ckpt)
|
16 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
29 |
return outputs.cpu().numpy()[0]
|
30 |
|
31 |
|
32 |
+
class CLAPGeneralEmbedding(CLAPEmbedding):
|
33 |
|
34 |
def __init__(self):
|
35 |
super().__init__(ckpt="laion/larger_clap_general")
|
model_hubert.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Meta's HuBERT based speaker embedding.
|
2 |
+
- feature dimension: 1024
|
3 |
+
- source: https://huggingface.co/facebook/hubert-large-ll60k
|
4 |
+
"""
|
5 |
+
from typing import Optional
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import librosa
|
9 |
+
import numpy as np
|
10 |
+
from transformers import AutoFeatureExtractor, AutoModel
|
11 |
+
|
12 |
+
|
13 |
+
class HuBERTXLEmbedding:
|
14 |
+
|
15 |
+
def __init__(self, ckpt: str = "facebook/hubert-xlarge-ll60k"):
|
16 |
+
self.processor = AutoFeatureExtractor.from_pretrained(ckpt)
|
17 |
+
self.model = AutoModel.from_pretrained(ckpt)
|
18 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
+
self.model.to(self.device)
|
20 |
+
self.model.eval()
|
21 |
+
|
22 |
+
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
23 |
+
# audio file is decoded on the fly
|
24 |
+
if sampling_rate != self.processor.sampling_rate:
|
25 |
+
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.sampling_rate)
|
26 |
+
inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
|
27 |
+
with torch.no_grad():
|
28 |
+
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
+
return outputs.last_hidden_state.mean(1).cpu().numpy()[0]
|
30 |
+
|
31 |
+
|
32 |
+
class HuBERTLargeEmbedding(HuBERTXLEmbedding):
|
33 |
+
def __init__(self):
|
34 |
+
super().__init__("facebook/hubert-large-ll60k")
|
model_meta_voice.py
CHANGED
@@ -25,7 +25,7 @@ def wget(url: str, output_file: Optional[str] = None):
|
|
25 |
raise ValueError(f"failed to download {url}")
|
26 |
|
27 |
|
28 |
-
class
|
29 |
|
30 |
mel_window_length = 25
|
31 |
mel_window_step = 10
|
|
|
25 |
raise ValueError(f"failed to download {url}")
|
26 |
|
27 |
|
28 |
+
class MetaVoiceEmbedding(nn.Module):
|
29 |
|
30 |
mel_window_length = 25
|
31 |
mel_window_step = 10
|
model_pyannote_embedding.py
CHANGED
@@ -11,7 +11,7 @@ from pyannote.audio import Inference
|
|
11 |
from pyannote.audio.core.inference import fix_reproducibility, map_with_specifications
|
12 |
|
13 |
|
14 |
-
class
|
15 |
|
16 |
def __init__(self):
|
17 |
model = Model.from_pretrained("pyannote/embedding")
|
|
|
11 |
from pyannote.audio.core.inference import fix_reproducibility, map_with_specifications
|
12 |
|
13 |
|
14 |
+
class PyannoteEmbedding:
|
15 |
|
16 |
def __init__(self):
|
17 |
model = Model.from_pretrained("pyannote/embedding")
|
model_w2v_bert.py
CHANGED
@@ -10,7 +10,7 @@ import numpy as np
|
|
10 |
from transformers import Wav2Vec2BertModel, AutoFeatureExtractor
|
11 |
|
12 |
|
13 |
-
class
|
14 |
def __init__(self):
|
15 |
self.processor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
|
16 |
self.model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0")
|
|
|
10 |
from transformers import Wav2Vec2BertModel, AutoFeatureExtractor
|
11 |
|
12 |
|
13 |
+
class W2VBERTEmbedding:
|
14 |
def __init__(self):
|
15 |
self.processor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
|
16 |
self.model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0")
|
model_xls.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
"""Meta's XLS-R based speaker embedding.
|
2 |
- feature dimension: 768
|
3 |
- source: https://huggingface.co/facebook/wav2vec2-large-xlsr-53
|
|
|
4 |
"""
|
5 |
from typing import Optional
|
6 |
|
@@ -10,7 +11,7 @@ import numpy as np
|
|
10 |
from transformers import AutoFeatureExtractor, AutoModelForPreTraining
|
11 |
|
12 |
|
13 |
-
class
|
14 |
def __init__(self):
|
15 |
self.processor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
16 |
self.model = AutoModelForPreTraining.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
@@ -26,4 +27,3 @@ class XLSRSE:
|
|
26 |
with torch.no_grad():
|
27 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
28 |
return outputs.projected_states.mean(1).cpu().numpy()[0]
|
29 |
-
# return outputs.projected_quantized_states.mean(1).cpu().numpy()[0]
|
|
|
1 |
"""Meta's XLS-R based speaker embedding.
|
2 |
- feature dimension: 768
|
3 |
- source: https://huggingface.co/facebook/wav2vec2-large-xlsr-53
|
4 |
+
https://huggingface.co/docs/transformers/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput
|
5 |
"""
|
6 |
from typing import Optional
|
7 |
|
|
|
11 |
from transformers import AutoFeatureExtractor, AutoModelForPreTraining
|
12 |
|
13 |
|
14 |
+
class XLSREmbedding:
|
15 |
def __init__(self):
|
16 |
self.processor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
17 |
self.model = AutoModelForPreTraining.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
|
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
return outputs.projected_states.mean(1).cpu().numpy()[0]
|
|
test.py
CHANGED
@@ -1,36 +1,39 @@
|
|
1 |
import librosa
|
2 |
-
from model_clap import
|
3 |
-
from model_meta_voice import
|
4 |
-
from model_pyannote_embedding import
|
5 |
-
from model_w2v_bert import
|
6 |
-
from model_xls import
|
|
|
7 |
|
8 |
|
9 |
def test():
|
10 |
wav, sr = librosa.load("sample.wav")
|
11 |
print("XLS-R")
|
12 |
-
model =
|
13 |
-
v = model.get_speaker_embedding(wav, sr)
|
14 |
-
print(v.shape)
|
15 |
-
model = ClapSE()
|
16 |
v = model.get_speaker_embedding(wav, sr)
|
17 |
print(v.shape)
|
18 |
print("CLAP")
|
19 |
-
model =
|
20 |
v = model.get_speaker_embedding(wav, sr)
|
21 |
print(v.shape)
|
22 |
print("MetaVoiceSE")
|
23 |
-
model =
|
24 |
v = model.get_speaker_embedding(wav, sr)
|
25 |
print(v.shape)
|
26 |
print("PyannoteSE")
|
27 |
-
model =
|
28 |
v = model.get_speaker_embedding(wav, sr)
|
29 |
print(v.shape)
|
30 |
print("W2VBertSE")
|
31 |
-
model =
|
32 |
v = model.get_speaker_embedding(wav, sr)
|
33 |
print(v.shape)
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
|
36 |
if __name__ == '__main__':
|
|
|
1 |
import librosa
|
2 |
+
from model_clap import CLAPEmbedding
|
3 |
+
from model_meta_voice import MetaVoiceEmbedding
|
4 |
+
from model_pyannote_embedding import PyannoteEmbedding
|
5 |
+
from model_w2v_bert import W2VBERTEmbedding
|
6 |
+
from model_xls import XLSREmbedding
|
7 |
+
from model_hubert import HuBERTXLEmbedding
|
8 |
|
9 |
|
10 |
def test():
|
11 |
wav, sr = librosa.load("sample.wav")
|
12 |
print("XLS-R")
|
13 |
+
model = XLSREmbedding()
|
|
|
|
|
|
|
14 |
v = model.get_speaker_embedding(wav, sr)
|
15 |
print(v.shape)
|
16 |
print("CLAP")
|
17 |
+
model = CLAPEmbedding()
|
18 |
v = model.get_speaker_embedding(wav, sr)
|
19 |
print(v.shape)
|
20 |
print("MetaVoiceSE")
|
21 |
+
model = MetaVoiceEmbedding()
|
22 |
v = model.get_speaker_embedding(wav, sr)
|
23 |
print(v.shape)
|
24 |
print("PyannoteSE")
|
25 |
+
model = PyannoteEmbedding()
|
26 |
v = model.get_speaker_embedding(wav, sr)
|
27 |
print(v.shape)
|
28 |
print("W2VBertSE")
|
29 |
+
model = W2VBERTEmbedding()
|
30 |
v = model.get_speaker_embedding(wav, sr)
|
31 |
print(v.shape)
|
32 |
+
print("huBERT")
|
33 |
+
model = HuBERTXLEmbedding()
|
34 |
+
v = model.get_speaker_embedding(wav, sr)
|
35 |
+
print(v.shape)
|
36 |
+
|
37 |
|
38 |
|
39 |
if __name__ == '__main__':
|