experiment-speaker-embedding / experiment_speaker_verification.py
asahi417's picture
init
14d62eb
raw
history blame
6.86 kB
import argparse
import json
import os
from os.path import join as p_join
from tqdm import tqdm
from time import time
import hdbscan
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
import pandas as pd
from datasets import load_dataset
from model_meta_voice import MetaVoiceEmbedding
from model_pyannote_embedding import PyannoteEmbedding
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
from model_speaker_embedding import (
W2VBERTEmbedding, Wav2VecEmbedding, XLSR300MEmbedding, XLSR1BEmbedding, XLSR2BEmbedding,
HuBERTBaseEmbedding, HuBERTLargeEmbedding, HuBERTXLEmbedding
)
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
dataset = load_dataset(dataset_name, split=data_split)
file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if os.path.exists(file_path):
return
model = model_class()
embeddings = []
for i in tqdm(dataset, total=len(dataset)):
start = time()
v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
tmp = {
"model": model_name,
"embedding": v.tolist(),
"sampling_rate": i["audio"]["sampling_rate"],
"process_time": time() - start,
"dataset_name": os.path.basename(dataset_name)
}
tmp.update({k: v for k, v in i.items() if k != "audio"})
embeddings.append(tmp)
with open(file_path, "w") as f:
f.write("\n".join([json.dumps(i) for i in embeddings]))
def cluster_embedding(model_name, dataset_name, label_name: str):
file_path_embedding = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
file_path_cluster = p_join("experiment_cache", "cluster", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.csv")
if not os.path.exists(file_path_cluster):
print('CLUSTERING')
os.makedirs(os.path.dirname(file_path_cluster), exist_ok=True)
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = [json.loads(i) for i in f.readlines()]
clusterer = hdbscan.HDBSCAN()
embeddings = [i["embedding"] for i in data]
keys = [i for i in range(len(data))]
clusterer.fit(np.stack(embeddings)) # data x dimension
print(f'{clusterer.labels_.max()} clusters found from {len(data)} data points')
print(f"generating report for {label_name}")
label = [i[label_name] for i in data]
cluster_info = [
{"id": k, "cluster": c, f"label.{label_name}": l} for c, k, l in zip(clusterer.labels_, keys, label) if c != -1
]
cluster_df = pd.DataFrame(cluster_info)
cluster_df.to_csv(file_path_cluster, index=False)
file_path_tsne = p_join("experiment_cache", "tsne", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.npy")
if not os.path.exists(file_path_tsne):
os.makedirs(os.path.dirname(file_path_tsne), exist_ok=True)
print('DIMENSION REDUCTION')
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = np.stack([json.loads(i)['embedding'] for i in f.readlines()]) # data x dimension
print(f'Dimension reduction: {data.shape}')
embedding_2d = TSNE(n_components=2, random_state=0).fit_transform(data)
np.save(file_path_tsne, embedding_2d)
embedding_2d = np.load(file_path_tsne)
print('PLOT')
figure_path = p_join("experiment_cache", "figure", f"2d.latent_space.{model_name}.{os.path.basename(dataset_name)}.{label_name}.png")
os.makedirs(os.path.dirname(figure_path), exist_ok=True)
with open(file_path_embedding) as f:
label = np.stack([json.loads(i)[label_name] for i in f.readlines()]) # data x dimension
label_type = sorted(list(set(label)))
label2id = {v: n for n, v in enumerate(label_type)}
plt.figure()
scatter = plt.scatter(
embedding_2d[:, 0],
embedding_2d[:, 1],
s=8,
c=[label2id[i] for i in label],
cmap=sns.color_palette('Spectral', len(label_type), as_cmap=True)
)
plt.gca().set_aspect('equal', 'datalim')
plt.legend(handles=scatter.legend_elements(num=len(label_type))[0],
labels=label_type,
bbox_to_anchor=(1.04, 1),
borderaxespad=0,
loc='upper left',
ncol=3 if len(label2id) > 12 else 1)
plt.savefig(figure_path, bbox_inches='tight', dpi=600)
def main(dataset_name, data_split, label_name):
get_embedding(MetaVoiceEmbedding, "meta_voice_se", dataset_name, data_split)
cluster_embedding("meta_voice_se", dataset_name, label_name)
get_embedding(PyannoteEmbedding, "pyannote_se", dataset_name, data_split)
cluster_embedding("pyannote_se", dataset_name, label_name)
get_embedding(CLAPEmbedding, "clap_se", dataset_name, data_split)
cluster_embedding("clap_se", dataset_name, label_name)
get_embedding(CLAPGeneralEmbedding, "clap_general_se", dataset_name, data_split)
cluster_embedding("clap_general_se", dataset_name, label_name)
get_embedding(HuBERTBaseEmbedding, "hubert_base_se", dataset_name, data_split)
cluster_embedding("hubert_base_se", dataset_name, label_name)
get_embedding(HuBERTXLEmbedding, "hubert_xl_se", dataset_name, data_split)
cluster_embedding("hubert_xl_se", dataset_name, label_name)
get_embedding(HuBERTLargeEmbedding, "hubert_large_se", dataset_name, data_split)
cluster_embedding("hubert_large_se", dataset_name, label_name)
get_embedding(Wav2VecEmbedding, "wav2vec_se", dataset_name, data_split)
cluster_embedding("wav2vec_se", dataset_name, label_name)
get_embedding(W2VBERTEmbedding, "w2v_bert_se", dataset_name, data_split)
cluster_embedding("w2v_bert_se", dataset_name, label_name)
get_embedding(XLSR300MEmbedding, "xlsr_300m_se", dataset_name, data_split)
cluster_embedding("xlsr_300m_se", dataset_name, label_name)
get_embedding(XLSR1BEmbedding, "xlsr_1b_se", dataset_name, data_split)
cluster_embedding("xlsr_1b_se", dataset_name, label_name)
get_embedding(XLSR2BEmbedding, "xlsr_2b_se", dataset_name, data_split)
cluster_embedding("xlsr_2b_se", dataset_name, label_name)
if __name__ == '__main__':
# main("asahi417/voxceleb1-test-split", "test", "speaker_id")
# main("ylacombe/expresso", "train", "speaker_id")
# main("ylacombe/expresso", "train", "style")
# main("asahi417/j-tube-speech", "test", "speaker_id")
main("asahi417/jvnv-emotional-speech-corpus", "test", "style")
main("asahi417/jvnv-emotional-speech-corpus", "test", "speaker_id")