|
""" |
|
Voices were cloned using metadata-balanced.csv, but not all voices were cloned successfully. |
|
Here, we create a JSON file mapping of available audio files to the TTS models that generated them. |
|
|
|
The JSON file is currently used in the clone-guesser gradio app, so we keep it for now. |
|
|
|
We then use the mapping to filter out for audio files that are available for all models. |
|
The filtered metadata is then saved to a CSV file, "metadata-valid.csv". |
|
""" |
|
|
|
from collections import defaultdict |
|
import json |
|
import os |
|
import pandas as pd |
|
|
|
|
|
def create_json(df, models, output_json): |
|
"""Create a dictionary file mapping of available audio files to the TTS models that generated them. |
|
|
|
The output JSON has the following structure: |
|
{ |
|
"path1.mp3": [ |
|
"model1", |
|
"model2", |
|
... |
|
], |
|
"path2.mp3": [ |
|
"model1", |
|
... |
|
], |
|
... |
|
} |
|
""" |
|
|
|
data = defaultdict(list) |
|
|
|
|
|
for path in df.path: |
|
for model in models: |
|
if os.path.exists(os.path.join(model, path)): |
|
data[path].append(model) |
|
|
|
|
|
with open(output_json, "w") as json_file: |
|
json.dump(data, json_file, indent=4) |
|
return data |
|
|
|
|
|
if __name__ == "__main__": |
|
output_json = "files.json" |
|
output_csv = "metadata-valid.csv" |
|
metadata_csv = "metadata-balanced.csv" |
|
sources = ["commonvoice", "metavoice", "playht", "stylettsv2", "xttsv2"] |
|
columns_to_keep = ["path", "age", "gender", "accents", "sentence"] |
|
|
|
|
|
|
|
df = pd.read_csv(metadata_csv) |
|
df = df[columns_to_keep] |
|
|
|
|
|
data = create_json(df, sources, output_json) |
|
|
|
|
|
valid_paths = [path for path in data if len(data[path]) == len(sources)] |
|
|
|
|
|
valid_df = df[df.path.isin(valid_paths)] |
|
|
|
|
|
all_dfs = [] |
|
for source in sources: |
|
valid_df_source = valid_df.copy() |
|
valid_df_source["source"] = source |
|
all_dfs.append(valid_df_source) |
|
|
|
|
|
is_cloned_voice = source != "commonvoice" |
|
valid_df_source["cloned_or_human"] = "cloned" if is_cloned_voice else "human" |
|
|
|
|
|
valid_df_source["filename"] = valid_df_source["path"] |
|
|
|
|
|
valid_df_source["path"] = valid_df_source["path"].apply( |
|
lambda path: os.path.join(source, path) |
|
) |
|
|
|
all_df = pd.concat(all_dfs, ignore_index=True) |
|
all_df.to_csv(output_csv, index=False) |
|
print(f"Saved to {output_csv}.") |
|
print() |
|
|
|
print("Statistics:") |
|
print("Number of human voices: ", len(all_df[all_df.cloned_or_human == "human"])) |
|
print("Number of cloned voices: ", len(all_df[all_df.cloned_or_human == "cloned"])) |
|
print("Number of TOTAL voices: ", len(all_df)) |
|
print() |
|
|
|
print("Breakdown by source:") |
|
print(all_df.source.value_counts()) |
|
print() |
|
|
|
print("Gender distribution (total):") |
|
print(all_df.gender.value_counts()) |
|
print() |
|
|
|
print("Gender distribution (human):") |
|
print(all_df[all_df.cloned_or_human == "human"].gender.value_counts()) |
|
print() |
|
|
|
print("Gender distribution (cloned):") |
|
print(all_df[all_df.cloned_or_human == "cloned"].gender.value_counts()) |
|
print() |
|
|