odinsynth_dataset / odinsynth_dataset.py
enoriega's picture
Added a column with the token interval of the matches for each spec sentence
bda8295
import csv
import json
import os
from collections import defaultdict
import datasets
from tqdm import tqdm
_DESCRIPTION = """\
Supervised training data for odinsynth
"""
class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"rule_id": datasets.Value("int32"),
"parent": datasets.Value("string"),
"child": datasets.Value("string"),
"negative_child": datasets.Value("string"),
"spec": datasets.Sequence(datasets.Value("string")),
"matches": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
"step": datasets.Value("int8"),
"length": datasets.Value("int8")
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# # License for the dataset if available
# license=_LICENSE,
# # Citation for the dataset
# citation=_CITATION,
)
def _build_specs(self, path:str):
id_to_rule = {}
specs = defaultdict(list)
matches = defaultdict(list)
with open(path) as f:
for l in tqdm(f, desc="Pre-computing specs"):
try:
instance = json.loads(l)
if instance['match']:
rule_id = int(instance['id'])
rule = instance['question']
sent = instance['context']
if sent not in specs[rule]:
specs[rule].append(sent)
matches[rule].append([instance['match_start'], instance['match_end']])
id_to_rule[rule_id] = rule
except:
# TODO log
pass
return {rule_id:(specs[rule], matches[rule]) for rule_id, rule in id_to_rule.items()}
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
JSON_PATH = dl_manager.download_and_extract('merged_train_split_train.jsonl.gz')
TRAIN_ARCHIVE_PATH = dl_manager.download('train.tar.bz2')
VAL_ARCHIVE_PATH = dl_manager.download('val.tar.bz2')
TEST_ARCHIVE_PATH = dl_manager.download('test.tar.bz2')
specs = self._build_specs(JSON_PATH)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(TRAIN_ARCHIVE_PATH),
"specs": specs,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(TEST_ARCHIVE_PATH),
"specs": specs,
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"archive_iter": dl_manager.iter_archive(VAL_ARCHIVE_PATH),
"specs": specs,
"split": "val",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, archive_iter, specs, split):
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
key = 0
for tsv_path, file in archive_iter:
if tsv_path.endswith(".tsv"):
# Read the lines
reader = csv.reader((l.decode() for l in file), delimiter='\t')
for row in reader:
rule_id = int(row[0])
if rule_id in specs:
spec, matches = specs[rule_id]
assert len(spec) == len(matches), f"Rule id {id} has different number of sentences and matches"
yield key, {
"rule_id": rule_id,
"parent": row[1],
"child": row[2],
"negative_child": row[3],
"spec": spec,
"matches": matches,
"step": int(row[4]),
"length": int(row[5]),
}
# Increase the key after yielding the instacne
key += 1
if __name__ == "__main__":
ds = OdinsynthDatasetBuilder()
ds.download_and_prepare()
print(ds.cache_dir)