|
import os.path |
|
|
|
from datasets import load_from_disk, load_dataset |
|
from multiprocessing import cpu_count |
|
from glob import glob |
|
|
|
paths = glob("data/*/") |
|
for path in paths: |
|
dataset = load_dataset("parquet", data_files={"train":glob(os.path.join(path, "*.parquet"))}, streaming=False, split="train") |
|
print(dataset) |
|
dataset = dataset.map(lambda x: {"n": int(x["id"])}).sort("n") |
|
dataset = dataset.remove_columns(["n"]) |
|
for i in range(10): |
|
dataset.shard(num_shards=10, index=i).to_parquet(f"{path}/train-{i:05d}-of-{10:05d}.parquet") |