|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""IWSLT 2017 dataset """ |
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{cettoloEtAl:EAMT2012, |
|
Address = {Trento, Italy}, |
|
Author = {Mauro Cettolo and Christian Girardi and Marcello Federico}, |
|
Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation (EAMT)}, |
|
Date = {28-30}, |
|
Month = {May}, |
|
Pages = {261--268}, |
|
Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks}, |
|
Year = {2012}} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The IWSLT 2017 Evaluation Campaign includes a multilingual TED Talks MT task. The languages involved are five: |
|
|
|
German, English, Italian, Dutch, Romanian. |
|
|
|
For each language pair, training and development sets are available through the entry of the table below: by clicking, an archive will be downloaded which contains the sets and a README file. Numbers in the table refer to millions of units (untokenized words) of the target side of all parallel training sets. |
|
""" |
|
|
|
MULTI_URL = "https://huggingface.co/datasets/iwslt2017/resolve/ebd7c60d9800c2a1be010a227e5f0a2363730f7a/data/2017-01-trnmted/texts/DeEnItNlRo/DeEnItNlRo/DeEnItNlRo-DeEnItNlRo.tgz" |
|
|
|
|
|
class IWSLT2017Config(datasets.BuilderConfig): |
|
"""BuilderConfig for NewDataset""" |
|
|
|
def __init__(self, pair, is_multilingual, **kwargs): |
|
""" |
|
|
|
Args: |
|
pair: the language pair to consider |
|
is_multilingual: Is this pair in the multilingual dataset (download source is different) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
self.pair = pair |
|
self.is_multilingual = is_multilingual |
|
super().__init__(**kwargs) |
|
|
|
|
|
|
|
MULTI_LANGUAGES = ["en", "it", "nl", "ro"] |
|
BI_LANGUAGES = ["ar", "de", "en", "fr", "ja", "ko", "zh"] |
|
MULTI_PAIRS = [f"{source}-{target}" for source in MULTI_LANGUAGES for target in MULTI_LANGUAGES if source != target] |
|
BI_PAIRS = [ |
|
f"{source}-{target}" |
|
for source in BI_LANGUAGES |
|
for target in BI_LANGUAGES |
|
if source != target and (source == "en" or target == "en") |
|
] |
|
|
|
PAIRS = MULTI_PAIRS + BI_PAIRS |
|
|
|
|
|
class IWSLT217(datasets.GeneratorBasedBuilder): |
|
"""The IWSLT 2017 Evaluation Campaign includes a multilingual TED Talks MT task.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIG_CLASS = IWSLT2017Config |
|
BUILDER_CONFIGS = [ |
|
IWSLT2017Config( |
|
name="iwslt2017-" + pair, |
|
description="A small dataset", |
|
version=datasets.Version("1.0.0"), |
|
pair=pair, |
|
is_multilingual=pair in MULTI_PAIRS, |
|
) |
|
for pair in PAIRS |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{"translation": datasets.features.Translation(languages=self.config.pair.split("-"))} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage="https://sites.google.com/site/iwsltevaluation2017/TED-tasks", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
source, target = self.config.pair.split("-") |
|
if self.config.is_multilingual: |
|
dl_dir = dl_manager.download_and_extract(MULTI_URL) |
|
data_dir = os.path.join(dl_dir, "DeEnItNlRo-DeEnItNlRo") |
|
years = [2010] |
|
else: |
|
bi_url = f"https://huggingface.co/datasets/iwslt2017/resolve/ebd7c60d9800c2a1be010a227e5f0a2363730f7a/data/2017-01-trnted/texts/{source}/{target}/{source}-{target}.tgz" |
|
dl_dir = dl_manager.download_and_extract(bi_url) |
|
data_dir = os.path.join(dl_dir, f"{source}-{target}") |
|
years = [2010, 2011, 2012, 2013, 2014, 2015] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"source_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"train.tags.{self.config.pair}.{source}", |
|
) |
|
], |
|
"target_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"train.tags.{self.config.pair}.{target}", |
|
) |
|
], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"source_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"IWSLT17.TED.tst{year}.{self.config.pair}.{source}.xml", |
|
) |
|
for year in years |
|
], |
|
"target_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"IWSLT17.TED.tst{year}.{self.config.pair}.{target}.xml", |
|
) |
|
for year in years |
|
], |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"source_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"IWSLT17.TED.dev2010.{self.config.pair}.{source}.xml", |
|
) |
|
], |
|
"target_files": [ |
|
os.path.join( |
|
data_dir, |
|
f"IWSLT17.TED.dev2010.{self.config.pair}.{target}.xml", |
|
) |
|
], |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, source_files, target_files, split): |
|
"""Yields examples.""" |
|
id_ = 0 |
|
source, target = self.config.pair.split("-") |
|
for source_file, target_file in zip(source_files, target_files): |
|
with open(source_file, "r", encoding="utf-8") as sf: |
|
with open(target_file, "r", encoding="utf-8") as tf: |
|
for source_row, target_row in zip(sf, tf): |
|
source_row = source_row.strip() |
|
target_row = target_row.strip() |
|
|
|
if source_row.startswith("<"): |
|
if source_row.startswith("<seg"): |
|
|
|
|
|
part1 = source_row.split(">")[1] |
|
source_row = part1.split("<")[0] |
|
part1 = target_row.split(">")[1] |
|
target_row = part1.split("<")[0] |
|
|
|
source_row = source_row.strip() |
|
target_row = target_row.strip() |
|
else: |
|
continue |
|
|
|
yield id_, {"translation": {source: source_row, target: target_row}} |
|
id_ += 1 |
|
|