asahi417 commited on
Commit
43b7c1f
1 Parent(s): 6b734ef
Files changed (1) hide show
  1. experiment_speaker_verification.py +32 -18
experiment_speaker_verification.py CHANGED
@@ -10,18 +10,8 @@ from model_pyannote_embedding import PyannoteSE
10
  from model_w2v_bert import W2VBertSE
11
 
12
 
13
- # def to_json_serializable(val):
14
- # if type(val) is list:
15
- # return val
16
- # if "float" in str(type(val)):
17
- # return float(val)
18
- # if "int" in str(type(val)):
19
- # return int(val)
20
- # return str(val)
21
-
22
-
23
- def get_embedding(model_class, model_name: str, dataset_name: str):
24
- dataset = load_dataset(dataset_name, split="test")
25
  file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
26
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
27
  if os.path.exists(file_path):
@@ -44,14 +34,38 @@ def get_embedding(model_class, model_name: str, dataset_name: str):
44
  f.write("\n".join([json.dumps(i) for i in embeddings]))
45
 
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  if __name__ == '__main__':
48
  # cache embedding
49
- get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split")
50
- # get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split")
51
- get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split")
52
- get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso")
53
- # get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso")
54
- get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso")
 
55
 
56
 
57
 
 
10
  from model_w2v_bert import W2VBertSE
11
 
12
 
13
+ def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
14
+ dataset = load_dataset(dataset_name, split=data_split)
 
 
 
 
 
 
 
 
 
 
15
  file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
16
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
17
  if os.path.exists(file_path):
 
34
  f.write("\n".join([json.dumps(i) for i in embeddings]))
35
 
36
 
37
+ def anlyze_embedding(model_class, model_name: str, dataset_name: str):
38
+ file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
39
+ assert os.path.exists(file_path)
40
+ with open(file_path) as f:
41
+ embeddings = [json.loads(i) for i in f.readlines()]
42
+ model = model_class()
43
+ embeddings = []
44
+ for i in tqdm(dataset, total=len(dataset)):
45
+ start = time()
46
+ v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
47
+ tmp = {
48
+ "model": model_name,
49
+ "embedding": v.tolist(),
50
+ "sampling_rate": i["audio"]["sampling_rate"],
51
+ "process_time": time() - start,
52
+ "dataset_name": os.path.basename(dataset_name)
53
+ }
54
+ tmp.update({k: v for k, v in i.items() if k != "audio"})
55
+ embeddings.append(tmp)
56
+ with open(file_path, "w") as f:
57
+ f.write("\n".join([json.dumps(i) for i in embeddings]))
58
+
59
+
60
  if __name__ == '__main__':
61
  # cache embedding
62
+ get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
63
+ get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
64
+ get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
65
+ get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
66
+ get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
67
+ get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
68
+
69
 
70
 
71