Datasets:

Multilinguality:
translation
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
found
ArXiv:
License:
system HF staff commited on
Commit
b68957a
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"neen": {"description": "Evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English.\n", "citation": "@misc{guzmn2019new,\n title={Two New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-English},\n author={Francisco Guzman and Peng-Jen Chen and Myle Ott and Juan Pino and Guillaume Lample and Philipp Koehn and Vishrav Chaudhary and Marc'Aurelio Ranzato},\n year={2019},\n eprint={1902.01382},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/facebookresearch/flores/", "license": "", "features": {"translation": {"languages": ["ne", "en"], "id": null, "_type": "Translation"}}, "supervised_keys": {"input": "ne", "output": "en"}, "builder_name": "flores", "config_name": "neen", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1000483, "num_examples": 2836, "dataset_name": "flores"}, "validation": {"name": "validation", "num_bytes": 850660, "num_examples": 2560, "dataset_name": "flores"}}, "download_checksums": {"https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz": {"num_bytes": 1542781, "checksum": "7a0245bb29fd03b46a1129831c183dfba0efc8452a9739d962759f25141aa648"}}, "download_size": 1542781, "dataset_size": 1851143, "size_in_bytes": 3393924}, "sien": {"description": "Evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English.\n", "citation": "@misc{guzmn2019new,\n title={Two New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-English},\n author={Francisco Guzman and Peng-Jen Chen and Myle Ott and Juan Pino and Guillaume Lample and Philipp Koehn and Vishrav Chaudhary and Marc'Aurelio Ranzato},\n year={2019},\n eprint={1902.01382},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/facebookresearch/flores/", "license": "", "features": {"translation": {"languages": ["si", "en"], "id": null, "_type": "Translation"}}, "supervised_keys": {"input": "si", "output": "en"}, "builder_name": "flores", "config_name": "sien", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 984947, "num_examples": 2767, "dataset_name": "flores"}, "validation": {"name": "validation", "num_bytes": 1032610, "num_examples": 2899, "dataset_name": "flores"}}, "download_checksums": {"https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz": {"num_bytes": 1542781, "checksum": "7a0245bb29fd03b46a1129831c183dfba0efc8452a9739d962759f25141aa648"}}, "download_size": 1542781, "dataset_size": 2017557, "size_in_bytes": 3560338}}
dummy/neen/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b082518357e95e2b1cea911b02a8760e99f9018367c49ec24095077b29344a9e
3
+ size 1718
dummy/sien/1.1.0/dummy_data-zip-extracted/dummy_data/wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.en ADDED
@@ -0,0 +1 @@
 
1
+ This is the wrong translation!
dummy/sien/1.1.0/dummy_data-zip-extracted/dummy_data/wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.si ADDED
@@ -0,0 +1 @@
 
1
+ එවැනි ආවරණයක් ලබාදීමට රක්ෂණ සපයන්නෙකු කැමති වුවත් ඒ සාමාන් යයෙන් බොහෝ රටවල පොදු ප් රතිපත්තියට විරුද්ධය.
dummy/sien/1.1.0/dummy_data-zip-extracted/dummy_data/wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.en ADDED
@@ -0,0 +1 @@
 
1
+ This is the wrong translation!
dummy/sien/1.1.0/dummy_data-zip-extracted/dummy_data/wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.si ADDED
@@ -0,0 +1 @@
 
1
+ එවැනි ආවරණයක් ලබාදීමට රක්ෂණ සපයන්නෙකු කැමති වුවත් ඒ සාමාන් යයෙන් බොහෝ රටවල පොදු ප් රතිපත්තියට විරුද්ධය.
dummy/sien/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d022fff4af79488c93f480c11a4dc1744fb1450bd29884cf32d9e18aa164e7a8
3
+ size 1772
flores.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Facebook Low Resource (FLoRes) machine translation benchmark dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import collections
22
+
23
+ import datasets
24
+
25
+
26
+ _DESCRIPTION = """\
27
+ Evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English.
28
+ """
29
+
30
+ _CITATION = """\
31
+ @misc{guzmn2019new,
32
+ title={Two New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-English},
33
+ author={Francisco Guzman and Peng-Jen Chen and Myle Ott and Juan Pino and Guillaume Lample and Philipp Koehn and Vishrav Chaudhary and Marc'Aurelio Ranzato},
34
+ year={2019},
35
+ eprint={1902.01382},
36
+ archivePrefix={arXiv},
37
+ primaryClass={cs.CL}
38
+ }
39
+ """
40
+
41
+ _DATA_URL = "https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz"
42
+
43
+ # Tuple that describes a single pair of files with matching translations.
44
+ # language_to_file is the map from language (2 letter string: example 'en')
45
+ # to the file path in the extracted directory.
46
+ TranslateData = collections.namedtuple("TranslateData", ["url", "language_to_file"])
47
+
48
+
49
+ class FloresConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for FLoRes."""
51
+
52
+ def __init__(self, language_pair=(None, None), **kwargs):
53
+ """BuilderConfig for FLoRes.
54
+
55
+ Args:
56
+ for the `datasets.features.text.TextEncoder` used for the features feature.
57
+ language_pair: pair of languages that will be used for translation. Should
58
+ contain 2-letter coded strings. First will be used at source and second
59
+ as target in supervised mode. For example: ("se", "en").
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ name = "%s%s" % (language_pair[0], language_pair[1])
63
+
64
+ description = ("Translation dataset from %s to %s") % (language_pair[0], language_pair[1])
65
+ super(FloresConfig, self).__init__(
66
+ name=name,
67
+ description=description,
68
+ version=datasets.Version("1.1.0", ""),
69
+ **kwargs,
70
+ )
71
+
72
+ # Validate language pair.
73
+ assert "en" in language_pair, ("Config language pair must contain `en`, got: %s", language_pair)
74
+ source, target = language_pair
75
+ non_en = source if target == "en" else target
76
+ assert non_en in ["ne", "si"], ("Invalid non-en language in pair: %s", non_en)
77
+
78
+ self.language_pair = language_pair
79
+
80
+
81
+ class Flores(datasets.GeneratorBasedBuilder):
82
+ """FLoRes machine translation dataset."""
83
+
84
+ BUILDER_CONFIGS = [
85
+ FloresConfig(
86
+ language_pair=("ne", "en"),
87
+ ),
88
+ FloresConfig(
89
+ language_pair=("si", "en"),
90
+ ),
91
+ ]
92
+
93
+ def _info(self):
94
+ source, target = self.config.language_pair
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=datasets.Features(
98
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
99
+ ),
100
+ supervised_keys=(source, target),
101
+ homepage="https://github.com/facebookresearch/flores/",
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
107
+
108
+ source, target = self.config.language_pair
109
+ non_en = source if target == "en" else target
110
+ path_tmpl = "{dl_dir}/wikipedia_en_ne_si_test_sets/wikipedia.{split}.{non_en}-en." "{lang}"
111
+
112
+ files = {}
113
+ for split in ("dev", "devtest"):
114
+ files[split] = {
115
+ "source_file": path_tmpl.format(dl_dir=dl_dir, split=split, non_en=non_en, lang=source),
116
+ "target_file": path_tmpl.format(dl_dir=dl_dir, split=split, non_en=non_en, lang=target),
117
+ }
118
+
119
+ return [
120
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["dev"]),
121
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["devtest"]),
122
+ ]
123
+
124
+ def _generate_examples(self, source_file, target_file):
125
+ """This function returns the examples in the raw (text) form."""
126
+ with open(source_file, encoding="utf-8") as f:
127
+ source_sentences = f.read().split("\n")
128
+ with open(target_file, encoding="utf-8") as f:
129
+ target_sentences = f.read().split("\n")
130
+
131
+ assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
132
+ len(source_sentences),
133
+ len(target_sentences),
134
+ source_file,
135
+ target_file,
136
+ )
137
+
138
+ source, target = self.config.language_pair
139
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
140
+ result = {"translation": {source: l1, target: l2}}
141
+ # Make sure that both translations are non-empty.
142
+ if all(result.values()):
143
+ yield idx, result