File size: 555 Bytes
1ffce31
 
 
 
 
 
 
 
 
 
 
 
b594c7b
1ffce31
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import os.path

from datasets import load_from_disk, load_dataset
from multiprocessing import cpu_count
from glob import glob

paths = glob("data/*/")
for path in paths:
    dataset = load_dataset("parquet", data_files={"train":glob(os.path.join(path, "*.parquet"))}, streaming=False, split="train")
    print(dataset)
    dataset = dataset.map(lambda x: {"n": int(x["id"])}).sort("n")
    dataset = dataset.remove_columns(["n"])
    for i in range(10):
        dataset.shard(num_shards=10, index=i).to_parquet(f"{path}/train-{i:05d}-of-{10:05d}.parquet")