Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
File size: 904 Bytes
e3e6197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import glob
import json
import os

from collections import defaultdict

_DATA_PATH = "data"

data_split_sizes = defaultdict(dict)

for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
    folder_path = os.path.dirname(stats)
    task_name = folder_path.split("/")[-1]
    split_name = os.path.basename(stats).split(".")[1]

    if not os.path.exists(f"{folder_path}/COMPLETED"):
        continue

    with open(stats, "r") as f:
        split_stats = json.load(f)
        nb_examples = split_stats["examples"]

    if nb_examples > 0:
        data_split_sizes[task_name][split_name] = nb_examples

with open("data_split_sizes.csv", "w", encoding="utf=8") as f:
    f.write("Data(sub)set|Number of examples per splits\n")
    for task_name in sorted(list(data_split_sizes.keys())):
        split_sizes_dict = json.dumps(data_split_sizes[task_name])
        f.write(f"{task_name}|{split_sizes_dict}\n")