syntactic_transformations / syntactic_transformations.py
aaronmueller
v1.0
91a19aa
raw
history blame contribute delete
No virus
9.6 kB
import dataclasses
import datasets
import json
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{mueller-etal-2022-coloring,
title = "Coloring the Blank Slate: Pre-training Imparts a Hierarchical Inductive Bias to Sequence-to-sequence Models",
author = "Mueller, Aaron and
Frank, Robert and
Linzen, Tal and
Wang, Luheng and
Schuster, Sebastian",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-acl.106",
doi = "10.18653/v1/2022.findings-acl.106",
pages = "1352--1368",
}
"""
_DESCRIPTION = """\
This is the dataset used for Coloring the Blank Slate:
Pre-training Imparts a Hierarchical Inductive Bias to
Sequence-to-sequence Models.
"""
class SyntacticTransformationsConfig(datasets.BuilderConfig):
def __init__(self, description, features, data_url, citation, url, **kwargs):
super(SyntacticTransformationsConfig, self).__init__(version=datasets.Version("1.18.3"), **kwargs)
self.description = description
self.text_features = features
self.citation = citation
self.data_url = data_url
self.url = url
class SyntacticTransformations(datasets.GeneratorBasedBuilder):
standard_features = datasets.Features(
{
"src": datasets.Value("string"),
"tgt": datasets.Value("string"),
"prefix": datasets.Value("string")
}
)
BUILDER_CONFIGS = [
SyntacticTransformationsConfig(
name="passiv-en-nps",
description="English passivization transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/passiv_en_nps/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
),
SyntacticTransformationsConfig(
name="passiv-de-nps",
description="German passivization transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/passiv_de_nps/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
),
SyntacticTransformationsConfig(
name="question-en",
description="English question formation transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/question_have-havent_en/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
),
SyntacticTransformationsConfig(
name="question-de",
description="German question formation transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/question_have-can_withquest_de/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
),
SyntacticTransformationsConfig(
name="passiv-en_de-nps",
description="Zero-shot English-to-German passivization transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/passiv_en-de_nps/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
),
SyntacticTransformationsConfig(
name="question-en_de",
description="Zero-shot English-to-German question formation transformations.",
features=standard_features,
data_url="https://raw.githubusercontent.com/sebschu/multilingual-transformations/main/data/question_have-can_de/",
url="https://github.com/sebschu/multilingual-transformations/",
citation=_CITATION
)
]
def _split_generators(self, dl_manager):
if self.config.name == "passiv-en-nps":
template = "passiv_en_nps.{}.json"
_URLS = {
"train": self.config.data_url + template.format("train"),
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen": self.config.data_url + template.format("gen"),
}
elif self.config.name == "passiv-de-nps":
template = "passiv_de_nps.{}.json"
_URLS = {
"train": self.config.data_url + template.format("train"),
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen": self.config.data_url + template.format("gen"),
}
elif self.config.name == "question-en":
template = "question_have.{}.json"
_URLS = {
"train": self.config.data_url + template.format("train"),
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen": self.config.data_url + template.format("gen"),
}
elif self.config.name == "question-de":
template = "question_have_can.de.{}.json"
_URLS = {
"train": self.config.data_url + template.format("train"),
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen": self.config.data_url + template.format("gen"),
}
elif self.config.name == "question-en_de":
template = "question_have_can.de.{}.json"
_URLS = {
"train": self.config.data_url + "question_have_can.en-de.train.json",
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen_rc_s": self.config.data_url + template.format("gen_rc_s"),
"gen_rc_o": self.config.data_url + template.format("gen_rc_o"),
}
elif self.config.name == "passiv-en_de-nps":
template = "passiv_de_nps.{}.json"
_URLS = {
"train": self.config.data_url + "passiv_en-de_nps.train.json",
"dev": self.config.data_url + template.format("dev"),
"test": self.config.data_url + template.format("test"),
"gen_pp_s": self.config.data_url + template.format("gen_pp_s"),
"gen_pp_o": self.config.data_url + template.format("gen_pp_o"),
}
data_files = dl_manager.download(_URLS)
if "en_de" not in self.config.name:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
datasets.SplitGenerator(name=datasets.NamedSplit('generalization'), gen_kwargs={"filepath": data_files["gen"]}),
]
else:
gen_s = "gen_pp_s" if "passiv" in self.config.name else "gen_rc_s"
gen_o = "gen_pp_o" if "passiv" in self.config.name else "gen_rc_o"
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
datasets.SplitGenerator(name=datasets.NamedSplit('generalization_s'), gen_kwargs={"filepath": data_files[gen_s]}),
datasets.SplitGenerator(name=datasets.NamedSplit('generalization_o'), gen_kwargs={"filepath": data_files[gen_o]}),
]
def _info(self):
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=_CITATION,
)
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
id_ = 0
for line in f:
example = json.loads(line)
src = example["translation"]["src"]
tgt = example["translation"]["tgt"]
prefix = example["translation"]["prefix"]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"src": src,
"tgt": tgt,
"prefix": prefix,
}
id_ += 1