import subprocess import urllib import typer from datasets import (get_dataset_config_names, get_dataset_split_names, load_dataset) from huggingface_hub import list_datasets app = typer.Typer() def convert(dataset_id: str): dataset_name = dataset_id.split("/")[-1] configs = get_dataset_config_names(dataset_id) for config in configs: splits = get_dataset_split_names(dataset_id, config) splits = [split for split in splits if split not in ["train", "validation"]] for split in splits: columns_to_keep = ["gem_id", "gem_parent_id", "target"] dataset = load_dataset(dataset_id, name=config, split=split) # For non-train splits, it seems we use the references column as the target if "train" not in split: dataset = dataset.map(lambda x: {"target": x["references"]}) else: # Wrap references in list to match GEM schema dataset = dataset.map(lambda x: {"target": [x["target"]]}) # Delete unused columns # The test split doesn't have a parent ID if split == "test": columns_to_keep.remove("gem_parent_id") # The `datasets` JSON serializer is buggy - use `pandas` for now df = dataset.to_pandas() reference_name = f"{dataset_name}_{config}_{split}" df[columns_to_keep].to_json(f"{reference_name}.json", orient="records") validate(reference_name) def validate(reference_name: str): # Download original references from GitHub repo url = f"https://github.com/GEM-benchmark/GEM-metrics/releases/download/data/{reference_name}.json" urllib.request.urlretrieve( url, f"github_references/{reference_name}.json", ) # Run diff - requires `jq` process = subprocess.run( f"diff <(jq --sort-keys . {reference_name}.json) <(jq --sort-keys . ./github_references/{reference_name}.json)", shell=True, stdout=subprocess.PIPE, executable="/bin/bash", ) if process.stdout: raise ValueError(f"❌ Validation failed for {reference_name}! New and original references do not agree 😭") else: typer.echo(f"✅ Validation successful for {reference_name}!") @app.command() def main(): all_datasets = list_datasets() # Filter for GEM datasets gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")] # Filter for blocklist blocklist = ["test", "test-transform"] gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist] # Focus on MLSUM for now - later remove this line to evaluate all datasets mlsum_datasets = [dataset for dataset in gem_datasets if dataset.id.startswith("GEM/mlsum")] for dataset in mlsum_datasets: convert(dataset.id) typer.echo(f"🥳 All datasets converted!") if __name__ == "__main__": app()