File size: 811 Bytes
a1d99b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
from datasets import load_dataset, DatasetDict
dataset_config = {
"LOADING_SCRIPT_FILES": "policies.py",
"CONFIG_NAME": "plain_text",
"DATA_DIR": "data",
"CACHE_DIR": "cache_policies",
}
ds = load_dataset(
dataset_config["LOADING_SCRIPT_FILES"],
dataset_config["CONFIG_NAME"],
data_dir=dataset_config["DATA_DIR"],
cache_dir=dataset_config["CACHE_DIR"]
)
# 90% train, 10% test + validation
# Split the 10% test + valid in half test, half valid
train_testvalid = ds["train"].train_test_split(shuffle=True, test_size=0.1)
test_valid = train_testvalid["test"].train_test_split(
test_size=0.5) # gather everything into a single DatasetDict
ds = DatasetDict({
"train": train_testvalid["train"],
"test": test_valid["test"],
"val": test_valid["train"],
}
)
print(ds)
|