|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Train, validation and test splits for TED talks as in http://phontron.com/data/ted_talks.tar.gz (detokenized) |
|
""" |
|
|
|
_CITATION = """\ |
|
@inproceedings{Ye2018WordEmbeddings, |
|
author = {Ye, Qi and Devendra, Sachan and Matthieu, Felix and Sarguna, Padmanabhan and Graham, Neubig}, |
|
title = {When and Why are pre-trained word embeddings useful for Neural Machine Translation}, |
|
booktitle = {HLT-NAACL}, |
|
year = {2018}, |
|
} |
|
""" |
|
|
|
_DATA_URL = "data/TED.tar" |
|
|
|
_LANGUAGES = ["ar", "az", "be", "bg", "bn", "bs", "cs", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fr-ca", "gl", "he", "hi", "hr", "hu", "hy", "id", "it", "ja", "ka", "kk", "ko", "ku", "lt", "mk", "mn", "mr", "ms", "my", "nb", "nl", "pl", "pt", "pt-br", "ro", "ru", "sk", "sl", "sq", "sr", "sv", "ta", "th", "tr", "uk", "ur", "vi", "zh", "zh-cn", "zh-tw"] |
|
|
|
|
|
class TedTalksConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for TED talk dataset.""" |
|
|
|
def __init__(self, language_pair=(None, None), **kwargs): |
|
self.language_pair = language_pair |
|
self.source, self.target = self.language_pair[0], self.language_pair[1] |
|
|
|
name = f"{self.source}_{self.target}" |
|
description = f"Parallel sentences in `{self.source}` and `{self.target}`." |
|
super(TedTalksConfig, self).__init__(name=name, description=description, **kwargs) |
|
|
|
|
|
class TedTalks(datasets.GeneratorBasedBuilder): |
|
"""TED talk data from http://phontron.com/data/ted_talks.tar.gz.""" |
|
|
|
unique_pairs = [ |
|
"_".join([l1, l2]) |
|
for l1 in _LANGUAGES |
|
for l2 in _LANGUAGES |
|
if l1 != l2 |
|
] |
|
|
|
BUILDER_CONFIGS = [ |
|
TedTalksConfig( |
|
language_pair=(pair.split("_")[0], pair.split("_")[1]), |
|
version=datasets.Version("1.0.0", ""), |
|
) |
|
for pair in unique_pairs |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
self.config.source: datasets.features.Value("string"), |
|
self.config.target: datasets.features.Value("string"), |
|
} |
|
), |
|
homepage="https://github.com/neulab/word-embeddings-for-nmt", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive = dl_manager.download(_DATA_URL) |
|
|
|
def _get_overlap(source_file, target_file): |
|
for path, f in dl_manager.iter_archive(archive): |
|
if path == source_file: |
|
source_sentences = f.read().decode("utf-8").split("\n") |
|
elif path == target_file: |
|
target_sentences = f.read().decode("utf-8").split("\n") |
|
|
|
return len([ |
|
(src, tgt) |
|
for src, tgt |
|
in zip(source_sentences, target_sentences) |
|
if src != "" and tgt != "" |
|
]) |
|
|
|
split2tedsplit = {"train": "train", "validation": "dev", "test": "test"} |
|
|
|
overlap = { |
|
split: _get_overlap( |
|
f"{split}/ted.{split2tedsplit[split]}.{self.config.source}", |
|
f"{split}/ted.{split2tedsplit[split]}.{self.config.target}" |
|
) for split in ["train", "validation", "test"] |
|
} |
|
|
|
generators = [] |
|
if overlap["train"] > 0: |
|
generators.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"source_file": f"train/ted.train.{self.config.source}", |
|
"target_file": f"train/ted.train.{self.config.target}", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
) |
|
if overlap["validation"] > 0: |
|
generators.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"source_file": f"validation/ted.dev.{self.config.source}", |
|
"target_file": f"validation/ted.dev.{self.config.target}", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
) |
|
if overlap["test"] > 0: |
|
generators.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"source_file": f"test/ted.test.{self.config.source}", |
|
"target_file": f"test/ted.test.{self.config.target}", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
) |
|
|
|
return generators |
|
|
|
|
|
def _generate_examples(self, source_file, target_file, files): |
|
"""Returns examples as raw text.""" |
|
|
|
source_sentences, target_sentences = None, None |
|
for path, f in files: |
|
if path == source_file: |
|
source_sentences = f.read().decode("utf-8").split("\n") |
|
elif path == target_file: |
|
target_sentences = f.read().decode("utf-8").split("\n") |
|
|
|
assert len(target_sentences) == len(source_sentences), ( |
|
f"Sizes do not match: {len(source_sentences)} vs {len(target_sentences)}." |
|
) |
|
|
|
|
|
source_target_pairs = [ |
|
(src, tgt) |
|
for src, tgt |
|
in zip(source_sentences, target_sentences) |
|
if src != "" and tgt != "" |
|
] |
|
|
|
if len(source_target_pairs) > 0: |
|
source_sentences, target_sentences = zip(*source_target_pairs) |
|
for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)): |
|
yield idx, {self.config.source: l1, self.config.target: l2} |