Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
fs / normalize_raw_data /normalize_scrolls.py
yuvalkirstain's picture
pass tokenizer
e7bb7b0
raw
history blame
854 Bytes
import os
import shutil
from fire import Fire
from datasets import load_dataset
from icecream import ic
def normalize_example(example):
return {"source": example["input"], "target": example["output"]}
def main(dataset_name, num_proc=5, data_dir="../data/"):
dataset = load_dataset("tau/scrolls", dataset_name)
dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
# ic(dataset_name, dataset["train"][0])
dir_name = os.path.join(data_dir, dataset_name)
os.makedirs(dir_name, exist_ok=True)
for split in dataset:
dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
shutil.make_archive(base_name=dir_name,
format='zip',
root_dir=dir_name)
shutil.rmtree(dir_name)
if __name__ == '__main__':
Fire(main)