|
import glob |
|
import json |
|
import os |
|
from collections import defaultdict |
|
|
|
|
|
_DATA_PATH = "data" |
|
|
|
data_split_sizes = defaultdict(dict) |
|
|
|
for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"): |
|
folder_path = os.path.dirname(stats) |
|
task_name = folder_path.split("/")[-1] |
|
split_name = os.path.basename(stats).split(".")[1] |
|
|
|
if not os.path.exists(f"{folder_path}/COMPLETED"): |
|
continue |
|
|
|
with open(stats, "r") as f: |
|
split_stats = json.load(f) |
|
nb_examples = split_stats["examples"] |
|
|
|
if nb_examples > 0: |
|
data_split_sizes[task_name][split_name] = nb_examples |
|
|
|
with open("data_split_sizes.csv", "w", encoding="utf=8") as f: |
|
|
|
f.write("Data(sub)set|Number of examples per splits\n") |
|
for task_name in sorted(list(data_split_sizes.keys())): |
|
split_sizes_dict = json.dumps(data_split_sizes[task_name]) |
|
f.write(f"{task_name}|{split_sizes_dict}\n") |
|
|