asahi417 commited on
Commit
85fe221
1 Parent(s): dcc9bd4
Files changed (2) hide show
  1. experiment_voxceleb1.py +8 -8
  2. model_meta_voice.py +2 -2
experiment_voxceleb1.py CHANGED
@@ -11,17 +11,16 @@ from model_w2v_bert import W2VBertSE
11
 
12
 
13
  cache_dir = p_join("experiment_cache", "voxceleb1")
14
- voxceleb1_dataset = load_dataset("asahi417/voxceleb1-test-split", split="test")
15
- print(f"voxceleb1_dataset: {len(voxceleb1_dataset)}")
16
 
17
 
18
- def get_embedding(model_class, model_name: str):
 
19
  file_path = p_join(cache_dir, f"embedding.{model_name}.json")
20
  if os.path.exists(file_path):
21
  return
22
  model = model_class()
23
  embeddings = []
24
- for i in tqdm(voxceleb1_dataset, total=len(voxceleb1_dataset)):
25
  start = time()
26
  v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
27
  embeddings.append({
@@ -30,7 +29,8 @@ def get_embedding(model_class, model_name: str):
30
  "sampling_rate": i["audio"]["sampling_rate"],
31
  "id": i["id"],
32
  "speaker_id": i["speaker_id"],
33
- "process_time": time() - start
 
34
  })
35
  with open(file_path, "w") as f:
36
  f.write("\n".join([json.dumps(i) for i in embeddings]))
@@ -38,8 +38,8 @@ def get_embedding(model_class, model_name: str):
38
 
39
  if __name__ == '__main__':
40
  # cache embedding
41
- get_embedding(MetaVoiceSE, "meta_voice_se")
42
- get_embedding(PyannoteSE, "pyannote_se")
43
- get_embedding(W2VBertSE, "w2v_bert_se")
44
 
45
 
 
11
 
12
 
13
  cache_dir = p_join("experiment_cache", "voxceleb1")
 
 
14
 
15
 
16
+ def get_embedding(model_class, model_name: str, dataset_name: str):
17
+ dataset = load_dataset(dataset_name, split="test")
18
  file_path = p_join(cache_dir, f"embedding.{model_name}.json")
19
  if os.path.exists(file_path):
20
  return
21
  model = model_class()
22
  embeddings = []
23
+ for i in tqdm(dataset, total=len(dataset)):
24
  start = time()
25
  v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
26
  embeddings.append({
 
29
  "sampling_rate": i["audio"]["sampling_rate"],
30
  "id": i["id"],
31
  "speaker_id": i["speaker_id"],
32
+ "process_time": time() - start,
33
+ "dataset_name": os.path.basename(dataset_name)
34
  })
35
  with open(file_path, "w") as f:
36
  f.write("\n".join([json.dumps(i) for i in embeddings]))
 
38
 
39
  if __name__ == '__main__':
40
  # cache embedding
41
+ get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split")
42
+ get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split")
43
+ get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split")
44
 
45
 
model_meta_voice.py CHANGED
@@ -14,8 +14,8 @@ import torch
14
  from torch import nn
15
 
16
 
17
- checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt.pt"
18
- model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt.pt")
19
 
20
 
21
  def wget(url: str, output_file: Optional[str] = None):
 
14
  from torch import nn
15
 
16
 
17
+ checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
18
+ model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")
19
 
20
 
21
  def wget(url: str, output_file: Optional[str] = None):