asahi417 commited on
Commit
7974918
1 Parent(s): 2e2b2f2
Files changed (1) hide show
  1. experiment_voxceleb1.py +43 -0
experiment_voxceleb1.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from os.path import join as p_join
4
+ from tqdm import tqdm
5
+
6
+ from datasets import load_dataset
7
+
8
+ from model_meta_voice import MetaVoiceSE
9
+ from model_pyannote_embedding import PyannoteSE
10
+ from model_w2v_bert import W2VBertSE
11
+
12
+
13
+ cache_dir = p_join("experiment_cache", "voxceleb1")
14
+ voxceleb1_dataset = load_dataset("asahi417/voxceleb1-test-split", split="test")
15
+ print(f"voxceleb1_dataset: {len(voxceleb1_dataset)}")
16
+
17
+
18
+ def get_embedding(model_class, model_name: str):
19
+ file_path = p_join(cache_dir, f"embedding.{model_name}.json")
20
+ if os.path.exists(file_path):
21
+ return
22
+ model = model_class()
23
+ embeddings = []
24
+ for i in tqdm(voxceleb1_dataset, total=len(voxceleb1_dataset)):
25
+ v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
26
+ embeddings.append({
27
+ "model": model_name,
28
+ "embedding": v.tolist(),
29
+ "sampling_rate": i["audio"]["sampling_rate"],
30
+ "id": i["id"],
31
+ "speaker_id": i["speaker_id"]
32
+ })
33
+ with open(file_path, "w") as f:
34
+ f.write("\n".join([json.dumps(i) for i in embeddings]))
35
+
36
+
37
+ if __name__ == '__main__':
38
+ # cache embedding
39
+ get_embedding(MetaVoiceSE, "meta_voice_se")
40
+ get_embedding(PyannoteSE, "pyannote_se")
41
+ get_embedding(W2VBertSE, "w2v_bert_se")
42
+
43
+