File size: 5,846 Bytes
91ac2f8 78f21ac 91ac2f8 78f21ac 91ac2f8 78f21ac 91ac2f8 e840d90 91ac2f8 e840d90 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import datasets
_DESCRIPTION = """\
Train, validation and test splits for TED talks as in http://phontron.com/data/ted_talks.tar.gz (detokenized)
"""
_CITATION = """\
@inproceedings{Ye2018WordEmbeddings,
author = {Ye, Qi and Devendra, Sachan and Matthieu, Felix and Sarguna, Padmanabhan and Graham, Neubig},
title = {When and Why are pre-trained word embeddings useful for Neural Machine Translation},
booktitle = {HLT-NAACL},
year = {2018},
}
"""
_DATA_URL = "data/TED.tar"
_LANGUAGES = ["ar", "az", "be", "bg", "bn", "bs", "cs", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fr-ca", "gl", "he", "hi", "hr", "hu", "hy", "id", "it", "ja", "ka", "kk", "ko", "ku", "lt", "mk", "mn", "mr", "ms", "my", "nb", "nl", "pl", "pt", "pt-br", "ro", "ru", "sk", "sl", "sq", "sr", "sv", "ta", "th", "tr", "uk", "ur", "vi", "zh", "zh-cn", "zh-tw"]
class TedTalksConfig(datasets.BuilderConfig):
"""BuilderConfig for TED talk dataset."""
def __init__(self, language_pair=(None, None), **kwargs):
self.language_pair = language_pair
self.source, self.target = self.language_pair[0], self.language_pair[1]
name = f"{self.source}_{self.target}"
description = f"Parallel sentences in `{self.source}` and `{self.target}`."
super(TedTalksConfig, self).__init__(name=name, description=description, **kwargs)
class TedTalks(datasets.GeneratorBasedBuilder):
"""TED talk data from http://phontron.com/data/ted_talks.tar.gz."""
unique_pairs = [
"_".join([l1, l2])
for l1 in _LANGUAGES
for l2 in _LANGUAGES
if l1 != l2
]
BUILDER_CONFIGS = [
TedTalksConfig(
language_pair=(pair.split("_")[0], pair.split("_")[1]),
version=datasets.Version("1.0.0", ""),
)
for pair in unique_pairs
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
self.config.source: datasets.features.Value("string"),
self.config.target: datasets.features.Value("string"),
}
),
homepage="https://github.com/neulab/word-embeddings-for-nmt",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DATA_URL)
def _get_overlap(source_file, target_file):
for path, f in dl_manager.iter_archive(archive):
if path == source_file:
source_sentences = f.read().decode("utf-8").split("\n")
elif path == target_file:
target_sentences = f.read().decode("utf-8").split("\n")
return len([
(src, tgt)
for src, tgt
in zip(source_sentences, target_sentences)
if src != "" and tgt != ""
])
split2tedsplit = {"train": "train", "validation": "dev", "test": "test"}
overlap = {
split: _get_overlap(
f"{split}/ted.{split2tedsplit[split]}.{self.config.source}",
f"{split}/ted.{split2tedsplit[split]}.{self.config.target}"
) for split in ["train", "validation", "test"]
}
generators = []
if overlap["train"] > 0:
generators.append(
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"source_file": f"train/ted.train.{self.config.source}",
"target_file": f"train/ted.train.{self.config.target}",
"files": dl_manager.iter_archive(archive),
},
),
)
if overlap["validation"] > 0:
generators.append(
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"source_file": f"validation/ted.dev.{self.config.source}",
"target_file": f"validation/ted.dev.{self.config.target}",
"files": dl_manager.iter_archive(archive),
},
),
)
if overlap["test"] > 0:
generators.append(
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"source_file": f"test/ted.test.{self.config.source}",
"target_file": f"test/ted.test.{self.config.target}",
"files": dl_manager.iter_archive(archive),
},
),
)
return generators
def _generate_examples(self, source_file, target_file, files):
"""Returns examples as raw text."""
source_sentences, target_sentences = None, None
for path, f in files:
if path == source_file:
source_sentences = f.read().decode("utf-8").split("\n")
elif path == target_file:
target_sentences = f.read().decode("utf-8").split("\n")
assert len(target_sentences) == len(source_sentences), (
f"Sizes do not match: {len(source_sentences)} vs {len(target_sentences)}."
)
# ignore empty
source_target_pairs = [
(src, tgt)
for src, tgt
in zip(source_sentences, target_sentences)
if src != "" and tgt != ""
]
if len(source_target_pairs) > 0:
source_sentences, target_sentences = zip(*source_target_pairs)
for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
yield idx, {self.config.source: l1, self.config.target: l2} |