odinsynth_dataset / odinsynth_dataset.py
enoriega's picture
Renamed source and destination to parent and child, respectively. Added support for the negative_child column.
71fcf55
raw
history blame
6.4 kB
import csv
import json
import os
from collections import defaultdict
import datasets
from tqdm import tqdm
_DESCRIPTION = """\
Supervised training data for odinsynth
"""
class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
# datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
# ]
#
# DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
features = datasets.Features(
{
"rule_id": datasets.Value("int32"),
"parent": datasets.Value("string"),
"child": datasets.Value("string"),
"negative_child": datasets.Value("string"),
"spec": datasets.Sequence(datasets.Value("string")),
"step": datasets.Value("int8"),
"length": datasets.Value("int8")
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# # License for the dataset if available
# license=_LICENSE,
# # Citation for the dataset
# citation=_CITATION,
)
def _build_specs(self, path:str):
id_to_rule = {}
specs = defaultdict(set)
with open(path) as f:
for l in tqdm(f, desc="Pre-computing specs"):
try:
instance = json.loads(l)
rule_id = int(instance['id'])
rule = instance['question']
sent = instance['context']
specs[rule].add(sent)
id_to_rule[rule_id] = rule
except:
# TODO log
pass
return {rule_id:specs[rule] for rule_id, rule in id_to_rule.items()}
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
JSON_PATH = dl_manager.download_and_extract('merged_train_split_train.jsonl.gz')
TRAIN_ARCHIVE_PATH = dl_manager.download('train.tar.bz2')
VAL_ARCHIVE_PATH = dl_manager.download('val.tar.bz2')
TEST_ARCHIVE_PATH = dl_manager.download('test.tar.bz2')
specs = self._build_specs(JSON_PATH)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(TRAIN_ARCHIVE_PATH),
"specs": specs,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(TEST_ARCHIVE_PATH),
"specs": specs,
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(VAL_ARCHIVE_PATH),
"specs": specs,
"split": "val",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, archive_iter, specs, split):
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
key = 0
for tsv_path, file in archive_iter:
if tsv_path.endswith(".tsv"):
# Read the lines
reader = csv.reader((l.decode() for l in file), delimiter='\t')
for row in reader:
rule_id = int(row[0])
if rule_id in specs:
yield key, {
"rule_id": rule_id,
"parent": row[1],
"child": row[2],
"negative_child": row[3],
"spec": specs[rule_id],
"step": int(row[4]),
"length": int(row[5]),
}
# Increase the key after yielding the instacne
key += 1
if __name__ == "__main__":
ds = OdinsynthDatasetBuilder()
ds.download_and_prepare()
print(ds.cache_dir)