asahi417 commited on
Commit
fe9e72f
1 Parent(s): 2d28075
Files changed (3) hide show
  1. experiment_speaker_verification.py +16 -7
  2. model_clap.py +29 -0
  3. test.py +15 -10
experiment_speaker_verification.py CHANGED
@@ -17,6 +17,7 @@ from datasets import load_dataset
17
  from model_meta_voice import MetaVoiceSE
18
  from model_pyannote_embedding import PyannoteSE
19
  from model_w2v_bert import W2VBertSE
 
20
 
21
 
22
  def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
@@ -114,22 +115,30 @@ def analyze_embedding(model_name: str, dataset_name: str, n_shot: int = 5, n_cro
114
 
115
 
116
  if __name__ == '__main__':
117
- # cache embedding
118
- # get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
119
- # get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
120
- # get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
121
- # get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
122
- # get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
123
- # get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
 
 
 
124
  cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
125
  cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
126
  cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
 
 
127
  cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
128
  cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
129
  cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
 
 
130
  cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
131
  cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
132
  cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
 
133
 
134
 
135
 
 
17
  from model_meta_voice import MetaVoiceSE
18
  from model_pyannote_embedding import PyannoteSE
19
  from model_w2v_bert import W2VBertSE
20
+ from model_clap import ClapSE
21
 
22
 
23
  def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
 
115
 
116
 
117
  if __name__ == '__main__':
118
+ get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
119
+ get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
120
+ get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
121
+ get_embedding(ClapSE, "clap_se", "asahi417/voxceleb1-test-split", "test")
122
+
123
+ get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
124
+ get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
125
+ get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
126
+ get_embedding(ClapSE, "clap_se", "ylacombe/expresso", "train")
127
+
128
  cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
129
  cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
130
  cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
131
+ cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
132
+
133
  cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
134
  cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
135
  cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
136
+ cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
137
+
138
  cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
139
  cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
140
  cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
141
+ cluster_embedding("clap_se", "ylacombe/expresso", "style")
142
 
143
 
144
 
model_clap.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CLAP embedding.
2
+ - feature dimension: 512
3
+ - source: https://huggingface.co/laion/larger_clap_music_and_speech
4
+ """
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import librosa
9
+ import numpy as np
10
+ from transformers import ClapModel, ClapProcessor
11
+
12
+
13
+ class ClapSE:
14
+ def __init__(self):
15
+ self.model = ClapModel.from_pretrained("laion/larger_clap_music_and_speech")
16
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ self.model.to(self.device)
18
+ self.model.eval()
19
+ self.processor = ClapProcessor.from_pretrained("laion/larger_clap_music_and_speech")
20
+
21
+ def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
22
+ if sampling_rate != self.processor.feature_extractor.sampling_rate:
23
+ wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.feature_extractor.sampling_rate)
24
+ inputs = self.processor(
25
+ audios=wav, sampling_rate=self.processor.feature_extractor.sampling_rate, return_tensors="pt"
26
+ )
27
+ with torch.no_grad():
28
+ outputs = self.model.get_audio_features(**{k: v.to(self.device) for k, v in inputs.items()})
29
+ return outputs.cpu().numpy()[0]
test.py CHANGED
@@ -1,4 +1,5 @@
1
  import librosa
 
2
  from model_meta_voice import MetaVoiceSE
3
  from model_pyannote_embedding import PyannoteSE
4
  from model_w2v_bert import W2VBertSE
@@ -6,18 +7,22 @@ from model_w2v_bert import W2VBertSE
6
 
7
  def test():
8
  wav, sr = librosa.load("sample.wav")
9
- print("MetaVoiceSE")
10
- model = MetaVoiceSE()
11
- v = model.get_speaker_embedding(wav, sr)
12
- print(v.shape)
13
- print("PyannoteSE")
14
- model = PyannoteSE()
15
- v = model.get_speaker_embedding(wav, sr)
16
- print(v.shape)
17
- print("W2VBertSE")
18
- model = W2VBertSE()
19
  v = model.get_speaker_embedding(wav, sr)
20
  print(v.shape)
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  if __name__ == '__main__':
 
1
  import librosa
2
+ from model_clap import ClapSE
3
  from model_meta_voice import MetaVoiceSE
4
  from model_pyannote_embedding import PyannoteSE
5
  from model_w2v_bert import W2VBertSE
 
7
 
8
  def test():
9
  wav, sr = librosa.load("sample.wav")
10
+ print("CLAP")
11
+ model = ClapSE()
 
 
 
 
 
 
 
 
12
  v = model.get_speaker_embedding(wav, sr)
13
  print(v.shape)
14
+ # print("MetaVoiceSE")
15
+ # model = MetaVoiceSE()
16
+ # v = model.get_speaker_embedding(wav, sr)
17
+ # print(v.shape)
18
+ # print("PyannoteSE")
19
+ # model = PyannoteSE()
20
+ # v = model.get_speaker_embedding(wav, sr)
21
+ # print(v.shape)
22
+ # print("W2VBertSE")
23
+ # model = W2VBertSE()
24
+ # v = model.get_speaker_embedding(wav, sr)
25
+ # print(v.shape)
26
 
27
 
28
  if __name__ == '__main__':