#!/usr/bin/env python """ This script will clone the `datasets` repository in your current directory and parse all currently available metadata, from the `README.md` yaml headers and the automatically generated json files. It dumps the results in a `metadata_{current-commit-of-datasets}.json` file. """ import json from pathlib import Path from subprocess import check_call, check_output from typing import Dict import yaml def metadata_from_readme(f: Path) -> Dict: with f.open() as fi: content = [line.strip() for line in fi] if content[0] == "---" and "---" in content[1:]: yamlblock = "\n".join(content[1 : content[1:].index("---") + 1]) return yaml.safe_load(yamlblock) or dict() def load_ds_datas(): drepo = Path("datasets") if drepo.exists() and drepo.is_dir(): check_call(["git", "pull"], cwd=str((Path.cwd() / "datasets").absolute())) else: check_call(["git", "clone", "https://github.com/huggingface/datasets.git"]) head_sha = check_output(["git", "rev-parse", "HEAD"]) datasets_md = dict() for ddir in sorted((drepo / "datasets").iterdir(), key=lambda d: d.name): try: metadata = metadata_from_readme(ddir / "README.md") except: metadata = None try: with (ddir / "dataset_infos.json").open() as fi: infos = json.load(fi) except: infos = None if metadata is not None and len(metadata) > 0: datasets_md[ddir.name] = dict(metadata=metadata, infos=infos) return head_sha.decode().strip(), datasets_md if __name__ == "__main__": head_sha, datas = load_ds_datas() with open(f"metadata_{head_sha}.json", "w") as fi: fi.write(json.dumps(datas))