experiment-speaker-embedding / experiment_speaker_verification.py
asahi417's picture
init
7b838b6
raw
history blame
No virus
10.1 kB
import json
import os
from os.path import join as p_join
from tqdm import tqdm
from time import time
import hdbscan
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
import pandas as pd
from datasets import load_dataset
from model_meta_voice import MetaVoiceEmbedding
from model_pyannote_embedding import PyannoteEmbedding
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
from model_speaker_embedding import (
W2VBERTEmbedding, Wav2VecEmbedding, XLSR300MEmbedding, XLSR1BEmbedding, XLSR2BEmbedding,
HuBERTBaseEmbedding, HuBERTLargeEmbedding, HuBERTXLEmbedding
)
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
dataset = load_dataset(dataset_name, split=data_split)
file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if os.path.exists(file_path):
return
model = model_class()
embeddings = []
for i in tqdm(dataset, total=len(dataset)):
start = time()
v = model.get_speaker_embedding(i["audio"]["array"], i["audio"]["sampling_rate"])
tmp = {
"model": model_name,
"embedding": v.tolist(),
"sampling_rate": i["audio"]["sampling_rate"],
"process_time": time() - start,
"dataset_name": os.path.basename(dataset_name)
}
tmp.update({k: v for k, v in i.items() if k != "audio"})
embeddings.append(tmp)
with open(file_path, "w") as f:
f.write("\n".join([json.dumps(i) for i in embeddings]))
def cluster_embedding(model_name, dataset_name, label_name: str):
file_path_embedding = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
file_path_cluster = p_join("experiment_cache", "cluster", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.csv")
if not os.path.exists(file_path_cluster):
print('CLUSTERING')
os.makedirs(os.path.dirname(file_path_cluster), exist_ok=True)
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = [json.loads(i) for i in f.readlines()]
clusterer = hdbscan.HDBSCAN()
embeddings = [i["embedding"] for i in data]
keys = [i["id"] for i in data]
clusterer.fit(np.stack(embeddings)) # data x dimension
print(f'{clusterer.labels_.max()} clusters found from {len(data)} data points')
print(f"generating report for {label_name}")
label = [i[label_name] for i in data]
cluster_info = [
{"id": k, "cluster": c, f"label.{label_name}": l} for c, k, l in zip(clusterer.labels_, keys, label) if c != -1
]
cluster_df = pd.DataFrame(cluster_info)
cluster_df.to_csv(file_path_cluster, index=False)
file_path_tsne = p_join("experiment_cache", "tsne", f"{model_name}.{os.path.basename(dataset_name)}.{label_name}.npy")
if not os.path.exists(file_path_tsne):
os.makedirs(os.path.dirname(file_path_tsne), exist_ok=True)
print('DIMENSION REDUCTION')
assert os.path.exists(file_path_embedding)
with open(file_path_embedding) as f:
data = np.stack([json.loads(i)['embedding'] for i in f.readlines()]) # data x dimension
print(f'Dimension reduction: {data.shape}')
embedding_2d = TSNE(n_components=2, random_state=0).fit_transform(data)
np.save(file_path_tsne, embedding_2d)
embedding_2d = np.load(file_path_tsne)
print('PLOT')
figure_path = p_join("experiment_cache", "figure", f"2d.latent_space.{model_name}.{os.path.basename(dataset_name)}.{label_name}.png")
os.makedirs(os.path.dirname(figure_path), exist_ok=True)
with open(file_path_embedding) as f:
label = np.stack([json.loads(i)[label_name] for i in f.readlines()]) # data x dimension
label_type = sorted(list(set(label)))
label2id = {v: n for n, v in enumerate(label_type)}
plt.figure()
scatter = plt.scatter(
embedding_2d[:, 0],
embedding_2d[:, 1],
s=8,
c=[label2id[i] for i in label],
cmap=sns.color_palette('Spectral', len(label_type), as_cmap=True)
)
plt.gca().set_aspect('equal', 'datalim')
plt.legend(handles=scatter.legend_elements(num=len(label_type))[0],
labels=label_type,
bbox_to_anchor=(1.04, 1),
borderaxespad=0,
loc='upper left',
ncol=3 if len(label2id) > 12 else 1)
plt.savefig(figure_path, bbox_inches='tight', dpi=600)
def analyze_embedding(model_name: str, dataset_name: str, n_shot: int = 5, n_cross_validation: int = 5):
file_path = p_join("experiment_cache", "embeddings", f"{model_name}.{os.path.basename(dataset_name)}.json")
assert os.path.exists(file_path)
with open(file_path) as f:
embeddings = [json.loads(i) for i in f.readlines()]
df = pd.DataFrame(embeddings)
process_time = df["process_time"].mean()
df.groupby("speaker_ido")
sorted(df["speaker_id"].unique())
if __name__ == '__main__':
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(PyannoteEmbedding, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(CLAPEmbedding, "clap_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "asahi417/voxceleb1-test-split", "test")
get_embedding(W2VBERTEmbedding, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(Wav2VecEmbedding, "wav2vec_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(XLSR300MEmbedding, "xlsr_300m_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(XLSR1BEmbedding, "xlsr_1b_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(XLSR2BEmbedding, "xlsr_2b_se", "asahi417/voxceleb1-test-split", "test")
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "ylacombe/expresso", "train")
# get_embedding(PyannoteEmbedding, "pyannote_se", "ylacombe/expresso", "train")
# get_embedding(CLAPEmbedding, "clap_se", "ylacombe/expresso", "train")
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "ylacombe/expresso", "train")
# get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "ylacombe/expresso", "train")
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "ylacombe/expresso", "train")
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "ylacombe/expresso", "train")
get_embedding(W2VBERTEmbedding, "w2v_bert_se", "ylacombe/expresso", "train")
# get_embedding(Wav2VecEmbedding, "wav2vec_se", "ylacombe/expresso", "train")
# get_embedding(XLSR300MEmbedding, "xlsr_300m_se", "ylacombe/expresso", "train")
# get_embedding(XLSR1BEmbedding, "xlsr_1b_se", "ylacombe/expresso", "train")
# get_embedding(XLSR2BEmbedding, "xlsr_2b_se", "ylacombe/expresso", "train")
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("hubert_base_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("hubert_large_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("hubert_xl_se", "asahi417/voxceleb1-test-split", "speaker_id")
cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("wav2vec_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("xlsr_300m_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("xlsr_1b_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("xlsr_2b_se", "asahi417/voxceleb1-test-split", "speaker_id")
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("hubert_base_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "speaker_id")
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("wav2vec_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("xlsr_300m_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("xlsr_1b_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("xlsr_2b_se", "ylacombe/expresso", "speaker_id")
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
# cluster_embedding("hubert_base_se", "ylacombe/expresso", "style")
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "style")
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "style")
cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
# cluster_embedding("wav2vec_se", "ylacombe/expresso", "style")
# cluster_embedding("xlsr_300m_se", "ylacombe/expresso", "style")
# cluster_embedding("xlsr_1b_se", "ylacombe/expresso", "style")
# cluster_embedding("xlsr_2b_se", "ylacombe/expresso", "style")