|
import os |
|
from datasets import load_dataset |
|
from datasets import config |
|
from datasets.utils.py_utils import convert_file_size_to_int |
|
from datasets.table import embed_table_storage |
|
from tqdm import tqdm |
|
|
|
|
|
def build_parquet(split): |
|
|
|
dataset = load_dataset("./src/LADaS.py", split=split, trust_remote_code=True) |
|
max_shard_size = '500MB' |
|
|
|
dataset_nbytes = dataset._estimate_nbytes() |
|
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) |
|
num_shards = int(dataset_nbytes / max_shard_size) + 1 |
|
num_shards = max(num_shards, 1) |
|
shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) |
|
|
|
def shards_with_embedded_external_files(shards): |
|
for shard in shards: |
|
format = shard.format |
|
shard = shard.with_format("arrow") |
|
shard = shard.map( |
|
embed_table_storage, |
|
batched=True, |
|
batch_size=1000, |
|
keep_in_memory=True, |
|
) |
|
shard = shard.with_format(**format) |
|
yield shard |
|
|
|
shards = shards_with_embedded_external_files(shards) |
|
|
|
os.makedirs("data", exist_ok=True) |
|
|
|
for index, shard in tqdm( |
|
enumerate(shards), |
|
desc="Save the dataset shards", |
|
total=num_shards, |
|
): |
|
shard_path = f"data/{split}-{index:05d}-of-{num_shards:05d}.parquet" |
|
shard.to_parquet(shard_path) |
|
|
|
|
|
if __name__ == "__main__": |
|
build_parquet("train") |
|
build_parquet("validation") |
|
build_parquet("test") |
|
|
|
|