Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
File size: 854 Bytes
523bcc3
 
 
 
ae7a3e8
523bcc3
 
ae7a3e8
523bcc3
 
802326d
523bcc3
ae7a3e8
e7bb7b0
802326d
523bcc3
 
 
802326d
 
e7bb7b0
802326d
523bcc3
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import os
import shutil
from fire import Fire
from datasets import load_dataset
from icecream import ic

def normalize_example(example):
    return {"source": example["input"], "target": example["output"]}


def main(dataset_name, num_proc=5, data_dir="../data/"):
    dataset = load_dataset("tau/scrolls", dataset_name)
    dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
    # ic(dataset_name, dataset["train"][0])
    dir_name = os.path.join(data_dir, dataset_name)
    os.makedirs(dir_name, exist_ok=True)
    for split in dataset:
        dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
    shutil.make_archive(base_name=dir_name,
                        format='zip',
                        root_dir=dir_name)
    shutil.rmtree(dir_name)


if __name__ == '__main__':
    Fire(main)