damt / damt.py
may-ohta's picture
Create damt.py
309d724
raw
history blame
4.33 kB
# coding=utf-8
"""Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
import datasets
_CITATION = """\
@inproceedings{koehn-knowles-2017-six,
title = "Six Challenges for Neural Machine Translation",
author = "Koehn, Philipp and
Knowles, Rebecca",
booktitle = "Proceedings of the First Workshop on Neural Machine Translation",
month = aug,
year = "2017",
address = "Vancouver",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-3204",
doi = "10.18653/v1/W17-3204",
pages = "28--39",
}
@inproceedings{aharoni2020unsupervised,
title={Unsupervised domain clusters in pretrained language models},
author={Aharoni, Roee and Goldberg, Yoav},
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
year={2020},
url={https://arxiv.org/abs/2004.02105},
publisher = "Association for Computational Linguistics"
}
"""
_URL = "https://drive.google.com/file/d/1yvB-pvlojtT2UpOX1JvwtD6rw9joQ49A/view"
_HOMEPAGE = "https://github.com/roeeaharoni/unsupervised-domain-clusters"
_DOMAIN = ["it", "koran", "law", "medical", "subtitles"]
class DAMTConfig(datasets.BuilderConfig):
"""BuilderConfig for DAMT Dataset"""
def __init__(self, domain=None, **kwargs):
"""
Args:
domain: domain name.
**kwargs: keyword arguments forwarded to super.
"""
super(DAMTConfig, self).__init__(
name=domain,
description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation.",
version=datasets.Version("1.0.0", ""),
**kwargs,
)
# Validate domain name.
assert domain in _DOMAIN
self.domain = domain
class DAMT(datasets.GeneratorBasedBuilder):
"""Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
BUILDER_CONFIGS = [DAMTConfig(domain=d) for d in _DOMAIN]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation",
# datasets.features.FeatureConnectors
features=datasets.Features(
{"translation": datasets.features.Translation(languages=("en", "de"))}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
domain = self.config.domain
def _get_drive_url(url):
return f"https://drive.google.com/uc?id={url.split('/')[5]}"
dl_dir = dl_manager.download_and_extract(_get_drive_url(_URL))
files = {
"train": {
"en_file": f"{dl_dir}/{domain}/train.en",
"de_file": f"{dl_dir}/{domain}/train.de",
},
"validation": {
"en_file": f"{dl_dir}/{domain}/dev.en",
"de_file": f"{dl_dir}/{domain}/dev.de",
},
"test": {
"en_file": f"{dl_dir}/{domain}/test.en",
"de_file": f"{dl_dir}/{domain}/test.de",
},
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["validation"]),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
]
def _generate_examples(self, en_file, de_file):
"""Yields examples."""
id_ = 0
with open(en_file, "r", encoding="utf-8") as en_f:
with open(de_file, "r", encoding="utf-8") as de_f:
for en, de in zip(en_f, de_f):
yield id_, {"translation": {"en": en.strip(), "de": de.strip()}}
id_ += 1