asahi417 commited on
Commit
f1c7699
1 Parent(s): 6f32cea
Files changed (1) hide show
  1. experiment_speaker_verification.py +20 -6
experiment_speaker_verification.py CHANGED
@@ -10,9 +10,19 @@ from model_pyannote_embedding import PyannoteSE
10
  from model_w2v_bert import W2VBertSE
11
 
12
 
 
 
 
 
 
 
 
 
 
 
13
  def get_embedding(model_class, model_name: str, dataset_name: str):
14
  dataset = load_dataset(dataset_name, split="test")
15
- file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{dataset_name}.json")
16
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
17
  if os.path.exists(file_path):
18
  return
@@ -21,15 +31,15 @@ def get_embedding(model_class, model_name: str, dataset_name: str):
21
  for i in tqdm(dataset, total=len(dataset)):
22
  start = time()
23
  v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
24
- embeddings.append({
25
  "model": model_name,
26
  "embedding": v.tolist(),
27
  "sampling_rate": i["audio"]["sampling_rate"],
28
- "id": i["id"],
29
- "speaker_id": i["speaker_id"],
30
  "process_time": time() - start,
31
  "dataset_name": os.path.basename(dataset_name)
32
- })
 
 
33
  with open(file_path, "w") as f:
34
  f.write("\n".join([json.dumps(i) for i in embeddings]))
35
 
@@ -37,7 +47,11 @@ def get_embedding(model_class, model_name: str, dataset_name: str):
37
  if __name__ == '__main__':
38
  # cache embedding
39
  get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split")
40
- get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split")
41
  get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split")
 
 
 
 
42
 
43
 
 
10
  from model_w2v_bert import W2VBertSE
11
 
12
 
13
+ # def to_json_serializable(val):
14
+ # if type(val) is list:
15
+ # return val
16
+ # if "float" in str(type(val)):
17
+ # return float(val)
18
+ # if "int" in str(type(val)):
19
+ # return int(val)
20
+ # return str(val)
21
+
22
+
23
  def get_embedding(model_class, model_name: str, dataset_name: str):
24
  dataset = load_dataset(dataset_name, split="test")
25
+ file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
26
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
27
  if os.path.exists(file_path):
28
  return
 
31
  for i in tqdm(dataset, total=len(dataset)):
32
  start = time()
33
  v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
34
+ tmp = {
35
  "model": model_name,
36
  "embedding": v.tolist(),
37
  "sampling_rate": i["audio"]["sampling_rate"],
 
 
38
  "process_time": time() - start,
39
  "dataset_name": os.path.basename(dataset_name)
40
+ }
41
+ tmp.update({k: v for k, v in i.items() if k != "audio"})
42
+ embeddings.append(tmp)
43
  with open(file_path, "w") as f:
44
  f.write("\n".join([json.dumps(i) for i in embeddings]))
45
 
 
47
  if __name__ == '__main__':
48
  # cache embedding
49
  get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split")
50
+ # get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split")
51
  get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split")
52
+ get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso")
53
+ get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso")
54
+ get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso")
55
+
56
 
57