references / convert_datasets_to_json.py
lewtun's picture
lewtun HF staff
Refactor
8af4987
raw
history blame
No virus
5 kB
import subprocess
import urllib
import typer
from datasets import (DatasetDict, get_dataset_config_names,
get_dataset_split_names, load_dataset)
from huggingface_hub import list_datasets
app = typer.Typer()
# These datasets do not agree with the GitHub sources due to small
# inconsistencies in `gem_id` conventions.
# Otherwise, they are identical and we skip then from the validation step.
validation_blocklist = [
"dart_test",
"schema_guided_dialog_test",
"web_nlg", # Prefix for multiple datasets
"wiki_auto", # Prefix for multiple datasets
]
def convert(dataset_id: str):
dataset_name = dataset_id.split("/")[-1]
configs = get_dataset_config_names(dataset_id)
skipped_validation = []
for config in configs:
typer.echo(dataset_id)
raw_datasets = load_dataset(dataset_id, name=config)
datasets_to_convert = DatasetDict()
for split, dataset in raw_datasets.items():
if split not in ["train", "validation"]:
datasets_to_convert[split] = dataset
for split, dataset in datasets_to_convert.items():
columns_to_keep = ["gem_id", "gem_parent_id", "target"]
# dataset = load_dataset(dataset_id, name=config, split=split)
# For non-train splits, it seems we use the references column as the target
if "train" not in split:
dataset = dataset.map(lambda x: {"target": x["references"]})
else:
# Wrap references in list to match GEM schema
dataset = dataset.map(lambda x: {"target": [x["target"]]})
# Delete unused columns
# The test split doesn't have a parent ID
if split == "test":
columns_to_keep.remove("gem_parent_id")
# The `datasets` JSON serializer is buggy - use `pandas` for now
df = dataset.to_pandas()
# Exclude dummy config names for comparison with GitHub source dataset
if config in ["default", "xsum", "totto"]:
reference_name = f"{dataset_name}_{split}"
else:
reference_name = f"{dataset_name}_{config}_{split}"
df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
# Exclude known datasets from validation
do_validation = list(filter(reference_name.startswith, validation_blocklist)) == []
if do_validation:
validated = validate(reference_name)
if not validated:
skipped_validation.append(reference_name)
typer.echo(f"Skipped validation for {skipped_validation}")
def validate(reference_name: str):
# Download original references from GitHub repo
url = f"https://github.com/GEM-benchmark/GEM-metrics/releases/download/data/{reference_name}.json"
try:
urllib.request.urlretrieve(
url,
f"github_references/{reference_name}.json",
)
except Exception:
typer.echo(f"β›” Could not download {reference_name} dataset from GitHub. Skipping validation ...")
return False
# Run diff - requires `jq`
process = subprocess.run(
f"diff <(jq --sort-keys . {reference_name}.json) <(jq --sort-keys . ./github_references/{reference_name}.json)",
shell=True,
stdout=subprocess.PIPE,
executable="/bin/bash",
)
if process.stdout:
# typer.echo(process.stdout)
raise ValueError(f"❌ Validation failed for {reference_name}! New and original references do not agree 😭")
else:
typer.echo(f"βœ… Validation successful for {reference_name}!")
return True
@app.command()
def main():
all_datasets = list_datasets()
# Filter for GEM datasets
gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
# Filter for blocklist - currently exclude all datasets not found on GitHUb release
blocklist = [
"ART",
"mlb_data_to_text",
"OrangeSum",
"split_and_rephrase",
"wiki_cat_sum",
"viggo",
"CrossWOZ",
"RiSAWOZ",
"indonlg",
"squad_v2",
"BiSECT",
"surface_realisation_st_2020",
"SciDuet",
"cochrane-simplification",
"turku_paraphrase_corpus",
"turku_hockey_data2text",
"sportsett_basketball",
"Taskmaster",
"wiki_lingua",
"SIMPITIKI",
"conversational_weather",
"RotoWire_English-German",
"dstc10_track2_task2",
"opusparcus",
"xlsum",
"references",
]
blocklist = ["GEM/" + dataset for dataset in blocklist]
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
for dataset in gem_datasets:
typer.echo(f"Converting {dataset.id} ...")
convert(dataset.id)
typer.echo(f"πŸ₯³ All datasets converted!")
if __name__ == "__main__":
app()