system HF staff commited on
Commit
29fac8c
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
The diff for this file is too large to render. See raw diff
 
dummy/mlqa-translate-train.ar/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7efeef9a69b6deec2005a46a08c227c773c31075dd870555bef9dc5fe447e0d8
3
+ size 2376
dummy/mlqa-translate-train.de/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96f3e66815a06bfbbacebcfbffa2c77e0d4eafd2438b7cfb57f958c9762e7a88
3
+ size 2226
dummy/mlqa-translate-train.es/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:308704cd187f455120e11b8e616adcfcbc9624ab67c0e3b407763c5bb18e458e
3
+ size 2242
dummy/mlqa-translate-train.hi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc432efcfdbd6d3c544459a608a2ddeada0ec04f16ef86002818287321ba6d1b
3
+ size 2670
dummy/mlqa-translate-train.vi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c62335f6d88d7b976a429cf5d41e90c27bc7a0c50c788be23f06a8f6046b5d52
3
+ size 2430
dummy/mlqa-translate-train.zh/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36e6b479c4f8d36ab950e812ae41d8455beeda4c3ff60b0ffe255163c865622f
3
+ size 2222
dummy/mlqa.en.ar/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:990692ce4bfc8d381f355deb5ea48b5fb86e0e9b2d9309d1f425444ee1e20429
3
+ size 2736
dummy/mlqa.en.de/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79351f97b800279f218f9a27b7134a47f4db7a42509ddb4eb3c7f05a0c8841a0
3
+ size 2806
dummy/mlqa.en.en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b364bf29b16f4f3888b432c0bba0d33f21eaa804cea41be5d2721f42c6c04c7
3
+ size 2650
dummy/mlqa.en.es/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:176d98d301de90a03da7b5dec2c5524ab1ba11e6fdce38ee1402f27702b68577
3
+ size 2678
dummy/mlqa.en.hi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b60ccd3cc2c1d93631fafc3fed06442ecf5ca91f57a3b0eef8322d9bfa9c0ac4
3
+ size 3138
dummy/mlqa.en.vi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e0e98d15adbeff967f8471bfead318dfdf4b6bf45a44a2785c2cfda63c1f2d1
3
+ size 2158
dummy/mlqa.en.zh/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b72beb6d43b8e2de8e960fb3b872aee5345d5cf9cfbf69eaa9d8a80ba02069be
3
+ size 2558
mlqa.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(mlqa): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(mlqa): BibTeX citation
12
+ _CITATION = """\
13
+ @article{lewis2019mlqa,
14
+ title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
15
+ author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
16
+ journal={arXiv preprint arXiv:1910.07475},
17
+ year={2019}
18
+ }
19
+ """
20
+
21
+ # TODO(mlqa):
22
+ _DESCRIPTION = """\
23
+ MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
24
+ MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
25
+ German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
26
+ 4 different languages on average.
27
+ """
28
+ _URL = "https://dl.fbaipublicfiles.com/MLQA/"
29
+ _DEV_TEST_URL = "MLQA_V1.zip"
30
+ _TRANSLATE_TEST_URL = "mlqa-translate-test.tar.gz"
31
+ _TRANSLATE_TRAIN_URL = "mlqa-translate-train.tar.gz"
32
+ _LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
33
+ _TRANSLATE_LANG = ["ar", "de", "vi", "zh", "es", "hi"]
34
+
35
+
36
+ class MlqaConfig(datasets.BuilderConfig):
37
+ def __init__(self, data_url, **kwargs):
38
+ """BuilderConfig for MLQA
39
+
40
+ Args:
41
+ data_url: `string`, url to the dataset
42
+ **kwargs: keyword arguments forwarded to super.
43
+ """
44
+ super(MlqaConfig, self).__init__(
45
+ version=datasets.Version(
46
+ "1.0.0",
47
+ ),
48
+ **kwargs,
49
+ )
50
+ self.data_url = data_url
51
+
52
+
53
+ class Mlqa(datasets.GeneratorBasedBuilder):
54
+ """TODO(mlqa): Short description of my dataset."""
55
+
56
+ # TODO(mlqa): Set up version.
57
+ VERSION = datasets.Version("1.0.0")
58
+ BUILDER_CONFIGS = (
59
+ [
60
+ MlqaConfig(
61
+ name="mlqa-translate-train." + lang,
62
+ data_url=_URL + _TRANSLATE_TRAIN_URL,
63
+ description="Machine-translated data for Translate-train (SQuAD Train and Dev sets machine-translated into "
64
+ "Arabic, German, Hindi, Vietnamese, Simplified Chinese and Spanish)",
65
+ )
66
+ for lang in _LANG
67
+ if lang != "en"
68
+ ]
69
+ + [
70
+ MlqaConfig(
71
+ name="mlqa-translate-test." + lang,
72
+ data_url=_URL + _TRANSLATE_TEST_URL,
73
+ description="Machine-translated data for Translate-Test (MLQA-test set machine-translated into English) ",
74
+ )
75
+ for lang in _LANG
76
+ if lang != "en"
77
+ ]
78
+ + [
79
+ MlqaConfig(
80
+ name="mlqa." + lang1 + "." + lang2,
81
+ data_url=_URL + _DEV_TEST_URL,
82
+ description="development and test splits",
83
+ )
84
+ for lang1 in _LANG
85
+ for lang2 in _LANG
86
+ ]
87
+ )
88
+
89
+ def _info(self):
90
+ # TODO(mlqa): Specifies the datasets.DatasetInfo object
91
+ return datasets.DatasetInfo(
92
+ # This is the description that will appear on the datasets page.
93
+ description=_DESCRIPTION,
94
+ # datasets.features.FeatureConnectors
95
+ features=datasets.Features(
96
+ {
97
+ "context": datasets.Value("string"),
98
+ "questions": datasets.Value("string"),
99
+ "answers": datasets.features.Sequence(
100
+ {"start": datasets.Value("int32"), "text": datasets.Value("string")}
101
+ ),
102
+ "ids": datasets.Value("string"),
103
+ # These are the features of your dataset like images, labels ...
104
+ }
105
+ ),
106
+ # If there's a common (input, target) tuple from the features,
107
+ # specify them here. They'll be used if as_supervised=True in
108
+ # builder.as_dataset.
109
+ supervised_keys=None,
110
+ # Homepage of the dataset for documentation
111
+ homepage="https://github.com/facebookresearch/MLQA",
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ """Returns SplitGenerators."""
117
+ # TODO(mlqa): Downloads the data and defines the splits
118
+ # dl_manager is a datasets.download.DownloadManager that can be used to
119
+ # download and extract URLs
120
+ if self.config.name.startswith("mlqa-translate-train"):
121
+ dl_file = dl_manager.download_and_extract(self.config.data_url)
122
+ lang = self.config.name.split(".")[-1]
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ # These kwargs will be passed to _generate_examples
127
+ gen_kwargs={
128
+ "filepath": os.path.join(
129
+ os.path.join(dl_file, "mlqa-translate-train"),
130
+ "{}_squad-translate-train-train-v1.1.json".format(lang),
131
+ )
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ # These kwargs will be passed to _generate_examples
137
+ gen_kwargs={
138
+ "filepath": os.path.join(
139
+ os.path.join(dl_file, "mlqa-translate-train"),
140
+ "{}_squad-translate-train-dev-v1.1.json".format(lang),
141
+ )
142
+ },
143
+ ),
144
+ ]
145
+
146
+ else:
147
+ if self.config.name.startswith("mlqa."):
148
+ dl_file = dl_manager.download_and_extract(self.config.data_url)
149
+ name = self.config.name.split(".")
150
+ l1, l2 = name[1:]
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TEST,
154
+ # These kwargs will be passed to _generate_examples
155
+ gen_kwargs={
156
+ "filepath": os.path.join(
157
+ os.path.join(dl_file, "MLQA_V1/test"),
158
+ "test-context-{}-question-{}.json".format(l1, l2),
159
+ )
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.VALIDATION,
164
+ # These kwargs will be passed to _generate_examples
165
+ gen_kwargs={
166
+ "filepath": os.path.join(
167
+ os.path.join(dl_file, "MLQA_V1/dev"), "dev-context-{}-question-{}.json".format(l1, l2)
168
+ )
169
+ },
170
+ ),
171
+ ]
172
+ else:
173
+ if self.config.name.startswith("mlqa-translate-test"):
174
+ dl_file = dl_manager.download_and_extract(self.config.data_url)
175
+ lang = self.config.name.split(".")[-1]
176
+ return [
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TEST,
179
+ # These kwargs will be passed to _generate_examples
180
+ gen_kwargs={
181
+ "filepath": os.path.join(
182
+ os.path.join(dl_file, "mlqa-translate-test"),
183
+ "translate-test-context-{}-question-{}.json".format(lang, lang),
184
+ )
185
+ },
186
+ ),
187
+ ]
188
+
189
+ def _generate_examples(self, filepath):
190
+ """Yields examples."""
191
+ # TODO(mlqa): Yields (key, example) tuples from the dataset
192
+ with open(filepath, encoding="utf-8") as f:
193
+ data = json.load(f)
194
+ for examples in data["data"]:
195
+ for example in examples["paragraphs"]:
196
+ context = example["context"]
197
+ for qa in example["qas"]:
198
+ question = qa["question"]
199
+ id_ = qa["id"]
200
+ answers = qa["answers"]
201
+ answers_start = [answer["answer_start"] for answer in answers]
202
+ answers_text = [answer["text"] for answer in answers]
203
+ yield id_, {
204
+ "context": context,
205
+ "questions": question,
206
+ "answers": {"start": answers_start, "text": answers_text},
207
+ "ids": id_,
208
+ }