Datasets:

Tasks:
Other
Languages:
English
Multilinguality:
monolingual
Size Categories:
100M<n<1B
ArXiv:
Tags:
License:
P3 / print_data_split_sizes.py
VictorSanh's picture
VictorSanh HF staff
hardcoded data_split_sizes and split_infos to avoid downloading
c76acf3
raw history blame
No virus
969 Bytes
import glob
import json
import os
from collections import defaultdict
_DATA_PATH = "data"
data_split_sizes = defaultdict(dict)
for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
folder_path = os.path.dirname(stats)
task_name = folder_path.split("/")[-1]
split_name = os.path.basename(stats).split(".")[1]
if not os.path.exists(f"{folder_path}/COMPLETED"):
continue
with open(stats, "r") as f:
split_stats = json.load(f)
nb_examples = split_stats["examples"]
if nb_examples > 0:
data_split_sizes[task_name][split_name] = nb_examples
with open("data_split_sizes.csv", "w", encoding="utf=8") as f:
# The file is now merged into `tasks_splits_and_features.py`
f.write("Data(sub)set|Number of examples per splits\n")
for task_name in sorted(list(data_split_sizes.keys())):
split_sizes_dict = json.dumps(data_split_sizes[task_name])
f.write(f"{task_name}|{split_sizes_dict}\n")