vox-cloned-data / create_valid_csv.py
jerpint's picture
update column name
35db34b
"""
Voices were cloned using metadata-balanced.csv, but not all voices were cloned successfully.
Here, we create a JSON file mapping of available audio files to the TTS models that generated them.
The JSON file is currently used in the clone-guesser gradio app, so we keep it for now.
We then use the mapping to filter out for audio files that are available for all models.
The filtered metadata is then saved to a CSV file, "metadata-valid.csv".
"""
from collections import defaultdict
import json
import os
import pandas as pd
def create_json(df, models, output_json):
"""Create a dictionary file mapping of available audio files to the TTS models that generated them.
The output JSON has the following structure:
{
"path1.mp3": [
"model1",
"model2",
...
],
"path2.mp3": [
"model1",
...
],
...
}
"""
data = defaultdict(list)
# For each path, add the model if the file exists
for path in df.path:
for model in models:
if os.path.exists(os.path.join(model, path)):
data[path].append(model)
# Save to JSON file (Currently used in gradio app, keep it for now)
with open(output_json, "w") as json_file:
json.dump(data, json_file, indent=4)
return data
if __name__ == "__main__":
output_json = "files.json"
output_csv = "metadata-valid.csv"
metadata_csv = "metadata-balanced.csv"
sources = ["commonvoice", "metavoice", "playht", "stylettsv2", "xttsv2"]
columns_to_keep = ["path", "age", "gender", "accents", "sentence"] # from the original metadata of commonvoice
# Load the metadata
df = pd.read_csv(metadata_csv)
df = df[columns_to_keep]
# Create the JSON file
data = create_json(df, sources, output_json)
# Get paths that are only available for all models
valid_paths = [path for path in data if len(data[path]) == len(sources)]
# Filter dataframe to only include valid paths
valid_df = df[df.path.isin(valid_paths)]
# Create an entry for each model in csv
all_dfs = []
for source in sources:
valid_df_source = valid_df.copy()
valid_df_source["source"] = source
all_dfs.append(valid_df_source)
# Add cloned_or_real column
is_cloned_voice = source != "commonvoice"
valid_df_source["cloned_or_human"] = "cloned" if is_cloned_voice else "human"
# Add fname column
valid_df_source["filename"] = valid_df_source["path"]
# Add path column
valid_df_source["path"] = valid_df_source["path"].apply(
lambda path: os.path.join(source, path)
)
all_df = pd.concat(all_dfs, ignore_index=True)
all_df.to_csv(output_csv, index=False)
print(f"Saved to {output_csv}.")
print()
print("Statistics:")
print("Number of human voices: ", len(all_df[all_df.cloned_or_human == "human"]))
print("Number of cloned voices: ", len(all_df[all_df.cloned_or_human == "cloned"]))
print("Number of TOTAL voices: ", len(all_df))
print()
print("Breakdown by source:")
print(all_df.source.value_counts())
print()
print("Gender distribution (total):")
print(all_df.gender.value_counts())
print()
print("Gender distribution (human):")
print(all_df[all_df.cloned_or_human == "human"].gender.value_counts())
print()
print("Gender distribution (cloned):")
print(all_df[all_df.cloned_or_human == "cloned"].gender.value_counts())
print()