File size: 6,856 Bytes
b9362f0 7974918 dcc9bd4 2d28075 7974918 4325a58 7b838b6 7974918 43b7c1f f1c7699 a772b7b 7974918 85fe221 dcc9bd4 7974918 f1c7699 7974918 85fe221 f1c7699 7974918 2d28075 588b5b0 2d28075 855b061 2d28075 855b061 2d28075 b9362f0 43b7c1f 7974918 b9362f0 14d62eb 43b7c1f f1c7699 7974918 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import argparse
import json
import os
from os.path import join as p_join
from tqdm import tqdm
from time import time
import hdbscan
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
import pandas as pd
from datasets import load_dataset
from model_meta_voice import MetaVoiceEmbedding
from model_pyannote_embedding import PyannoteEmbedding
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
from model_speaker_embedding import (
W2VBERTEmbedding, Wav2VecEmbedding, XLSR300MEmbedding, XLSR1BEmbedding, XLSR2BEmbedding,
HuBERTBaseEmbedding, HuBERTLargeEmbedding, HuBERTXLEmbedding
)
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
dataset = load_dataset(dataset_name, split=data_split)
file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if os.path.exists(file_path):
return
model = model_class()
embeddings = []
for i in tqdm(dataset, total=len(dataset)):
start = time()
v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
tmp = {
"model": model_name,
"embedding": v.tolist(),
"sampling_rate": i["audio"]["sampling_rate"],
"process_time": time() - start,
"dataset_name": os.path.basename(dataset_name)
}
tmp.update({k: v for k, v in i.items() if k != "audio"})
embeddings.append(tmp)
with open(file_path, "w") as f:
f.write("\n".join([json.dumps(i) for i in embeddings]))
def cluster_embedding(model_name, dataset_name, label_name: str):
file_path_embedding = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
file_path_cluster = p_join("experiment_cache", "cluster", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.csv")
if not os.path.exists(file_path_cluster):
print('CLUSTERING')
os.makedirs(os.path.dirname(file_path_cluster), exist_ok=True)
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = [json.loads(i) for i in f.readlines()]
clusterer = hdbscan.HDBSCAN()
embeddings = [i["embedding"] for i in data]
keys = [i for i in range(len(data))]
clusterer.fit(np.stack(embeddings)) # data x dimension
print(f'{clusterer.labels_.max()} clusters found from {len(data)} data points')
print(f"generating report for {label_name}")
label = [i[label_name] for i in data]
cluster_info = [
{"id": k, "cluster": c, f"label.{label_name}": l} for c, k, l in zip(clusterer.labels_, keys, label) if c != -1
]
cluster_df = pd.DataFrame(cluster_info)
cluster_df.to_csv(file_path_cluster, index=False)
file_path_tsne = p_join("experiment_cache", "tsne", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.npy")
if not os.path.exists(file_path_tsne):
os.makedirs(os.path.dirname(file_path_tsne), exist_ok=True)
print('DIMENSION REDUCTION')
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = np.stack([json.loads(i)['embedding'] for i in f.readlines()]) # data x dimension
print(f'Dimension reduction: {data.shape}')
embedding_2d = TSNE(n_components=2, random_state=0).fit_transform(data)
np.save(file_path_tsne, embedding_2d)
embedding_2d = np.load(file_path_tsne)
print('PLOT')
figure_path = p_join("experiment_cache", "figure", f"2d.latent_space.{model_name}.{os.path.basename(dataset_name)}.{label_name}.png")
os.makedirs(os.path.dirname(figure_path), exist_ok=True)
with open(file_path_embedding) as f:
label = np.stack([json.loads(i)[label_name] for i in f.readlines()]) # data x dimension
label_type = sorted(list(set(label)))
label2id = {v: n for n, v in enumerate(label_type)}
plt.figure()
scatter = plt.scatter(
embedding_2d[:, 0],
embedding_2d[:, 1],
s=8,
c=[label2id[i] for i in label],
cmap=sns.color_palette('Spectral', len(label_type), as_cmap=True)
)
plt.gca().set_aspect('equal', 'datalim')
plt.legend(handles=scatter.legend_elements(num=len(label_type))[0],
labels=label_type,
bbox_to_anchor=(1.04, 1),
borderaxespad=0,
loc='upper left',
ncol=3 if len(label2id) > 12 else 1)
plt.savefig(figure_path, bbox_inches='tight', dpi=600)
def main(dataset_name, data_split, label_name):
get_embedding(MetaVoiceEmbedding, "meta_voice_se", dataset_name, data_split)
cluster_embedding("meta_voice_se", dataset_name, label_name)
get_embedding(PyannoteEmbedding, "pyannote_se", dataset_name, data_split)
cluster_embedding("pyannote_se", dataset_name, label_name)
get_embedding(CLAPEmbedding, "clap_se", dataset_name, data_split)
cluster_embedding("clap_se", dataset_name, label_name)
get_embedding(CLAPGeneralEmbedding, "clap_general_se", dataset_name, data_split)
cluster_embedding("clap_general_se", dataset_name, label_name)
get_embedding(HuBERTBaseEmbedding, "hubert_base_se", dataset_name, data_split)
cluster_embedding("hubert_base_se", dataset_name, label_name)
get_embedding(HuBERTXLEmbedding, "hubert_xl_se", dataset_name, data_split)
cluster_embedding("hubert_xl_se", dataset_name, label_name)
get_embedding(HuBERTLargeEmbedding, "hubert_large_se", dataset_name, data_split)
cluster_embedding("hubert_large_se", dataset_name, label_name)
get_embedding(Wav2VecEmbedding, "wav2vec_se", dataset_name, data_split)
cluster_embedding("wav2vec_se", dataset_name, label_name)
get_embedding(W2VBERTEmbedding, "w2v_bert_se", dataset_name, data_split)
cluster_embedding("w2v_bert_se", dataset_name, label_name)
get_embedding(XLSR300MEmbedding, "xlsr_300m_se", dataset_name, data_split)
cluster_embedding("xlsr_300m_se", dataset_name, label_name)
get_embedding(XLSR1BEmbedding, "xlsr_1b_se", dataset_name, data_split)
cluster_embedding("xlsr_1b_se", dataset_name, label_name)
get_embedding(XLSR2BEmbedding, "xlsr_2b_se", dataset_name, data_split)
cluster_embedding("xlsr_2b_se", dataset_name, label_name)
if __name__ == '__main__':
# main("asahi417/voxceleb1-test-split", "test", "speaker_id")
# main("ylacombe/expresso", "train", "speaker_id")
# main("ylacombe/expresso", "train", "style")
# main("asahi417/j-tube-speech", "test", "speaker_id")
main("asahi417/jvnv-emotional-speech-corpus", "test", "style")
main("asahi417/jvnv-emotional-speech-corpus", "test", "speaker_id")
|