Datasets:
GEM
/

Multilinguality:
yes
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
ArXiv:
Tags:
data-to-text
License:
Sebastian Gehrmann commited on
Commit
e8cb727
1 Parent(s): a560f16

initial data loader

Browse files
Files changed (2) hide show
  1. README.md +37 -0
  2. TaTA.py +169 -0
README.md CHANGED
@@ -1,3 +1,40 @@
1
  ---
2
  license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
1
  ---
2
  license: cc-by-sa-4.0
3
+ dataset_info:
4
+ features:
5
+ - name: gem_id
6
+ dtype: string
7
+ - name: example_id
8
+ dtype: string
9
+ - name: title
10
+ dtype: string
11
+ - name: unit_of_measure
12
+ dtype: string
13
+ - name: chart_type
14
+ dtype: string
15
+ - name: was_translated
16
+ dtype: string
17
+ - name: table_data
18
+ dtype: string
19
+ - name: linearized_input
20
+ dtype: string
21
+ - name: table_text
22
+ sequence: string
23
+ - name: target
24
+ dtype: string
25
+ splits:
26
+ - name: ru
27
+ num_bytes: 308435
28
+ num_examples: 210
29
+ - name: test
30
+ num_bytes: 1691383
31
+ num_examples: 763
32
+ - name: train
33
+ num_bytes: 10019272
34
+ num_examples: 6962
35
+ - name: validation
36
+ num_bytes: 1598442
37
+ num_examples: 754
38
+ download_size: 18543506
39
+ dataset_size: 13617532
40
  ---
TaTA.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Dataloader for TaTA: A Multilingual Table-to-Text Dataset for African Languages."""
16
+
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+ import re
22
+
23
+ # Find for instance the citation on arxiv or on the dataset repo/website
24
+ _CITATION = """\
25
+ @misc{gehrmann2022TaTA,
26
+ Author = {Sebastian Gehrmann and Sebastian Ruder and Vitaly Nikolaev and Jan A. Botha and Michael Chavinda and Ankur Parikh and Clara Rivera},
27
+ Title = {TaTa: A Multilingual Table-to-Text Dataset for African Languages},
28
+ Year = {2022},
29
+ Eprint = {arXiv:2211.00142},
30
+ }
31
+ """
32
+
33
+ # You can copy an official description
34
+ _DESCRIPTION = """\
35
+ Dataset loader for TaTA: A Multilingual Table-to-Text Dataset for African Languages
36
+ """
37
+
38
+ _HOMEPAGE = "https://github.com/google-research/url-nlp/tree/main/tata"
39
+
40
+ _LICENSE = "CC-BY-SA 4.0"
41
+
42
+ _URLs = {
43
+ "train": "https://raw.githubusercontent.com/google-research/url-nlp/main/tata/train.json",
44
+ "validation": "https://raw.githubusercontent.com/google-research/url-nlp/main/tata/dev.json",
45
+ "test": "https://raw.githubusercontent.com/google-research/url-nlp/main/tata/test.json",
46
+ "ru": "https://raw.githubusercontent.com/google-research/url-nlp/main/tata/ru.json"
47
+ }
48
+
49
+
50
+ class TaTA(datasets.GeneratorBasedBuilder):
51
+ """TaTA dataset builder."""
52
+
53
+ VERSION = datasets.Version("1.1.0")
54
+
55
+ # This is an example of a dataset with multiple configurations.
56
+ # If you don't want/need to define several sub-sets in your dataset,
57
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
58
+
59
+ # If you need to make complex sub-parts in the datasets with configurable options
60
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
61
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
62
+
63
+ # You will be able to load one or the other configurations in the following list with
64
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
65
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
66
+ # BUILDER_CONFIGS = [
67
+ # datasets.BuilderConfig(name="nlg_en", version=VERSION, description="NLG: Data-to-English text."),
68
+ # datasets.BuilderConfig(name="nlg_de", version=VERSION, description="NLG: Data-to-German text."),
69
+ # datasets.BuilderConfig(name="mt_en-de", version=VERSION, description="MT: English-to-German text."),
70
+ # datasets.BuilderConfig(name="mt_de-en", version=VERSION, description="MT: German-to-English text."),
71
+ # datasets.BuilderConfig(name="nlg+mt_en-de", version=VERSION, description="NLG+MT: Data+English-to-German text."),
72
+ # datasets.BuilderConfig(name="nlg+mt_de-en", version=VERSION, description="NLG+MT: Data+German-to-English text."),
73
+ # ]
74
+
75
+ def _info(self):
76
+ # max 26 entries in each box_score field.
77
+ features = datasets.Features(
78
+ {
79
+ "gem_id": datasets.Value("string"),
80
+ "example_id": datasets.Value("string"),
81
+ "title": datasets.Value("string"),
82
+ "unit_of_measure": datasets.Value("string"),
83
+ "chart_type": datasets.Value("string"),
84
+ "was_translated": datasets.Value("string"),
85
+ "table_data": datasets.Value("string"), # datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
86
+ "linearized_input": datasets.Value("string"),
87
+ # This field has all the references in a list.
88
+ "table_text": datasets.Sequence(datasets.Value("string")),
89
+ # Only use `target` as supervised key, not for evaluation!
90
+ "target": datasets.Value("string"),
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ # This is the description that will appear on the datasets page.
95
+ description=_DESCRIPTION,
96
+ # This defines the different columns of the dataset and their types
97
+ features=features, # Here we define them above because they are different between the two configurations
98
+ # If there's a common (input, target) tuple from the features,
99
+ # specify them here. They'll be used if as_supervised=True in
100
+ # builder.as_dataset.
101
+ supervised_keys=("linearized_input", "target"),
102
+ # Homepage of the dataset for documentation
103
+ homepage=_HOMEPAGE,
104
+ # License for the dataset if available
105
+ license=_LICENSE,
106
+ # Citation for the dataset
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ """Returns SplitGenerators."""
112
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
113
+
114
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
115
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
116
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
117
+ data_dir = dl_manager.download_and_extract(_URLs)
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={
123
+ "filepath": data_dir["train"],
124
+ "split": "train",
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ # These kwargs will be passed to _generate_examples
130
+ gen_kwargs={
131
+ "filepath": data_dir["test"],
132
+ "split": "test"
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.VALIDATION,
137
+ # These kwargs will be passed to _generate_examples
138
+ gen_kwargs={
139
+ "filepath": data_dir["validation"],
140
+ "split": "validation",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name="ru",
145
+ # These kwargs will be passed to _generate_examples
146
+ gen_kwargs={
147
+ "filepath": data_dir["ru"],
148
+ "split": "ru",
149
+ },
150
+ ),
151
+ ]
152
+
153
+
154
+ def _generate_examples(
155
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
156
+ ):
157
+ """ Yields examples as (key, example) tuples. """
158
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
159
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
160
+
161
+ with open(filepath, encoding="utf-8") as f:
162
+ all_data = json.load(f)
163
+ for id_, data in enumerate(all_data):
164
+ data['gem_id'] = data['example_id']
165
+ if not data['table_text']:
166
+ data['target'] = ""
167
+ else:
168
+ data['target'] = data['table_text'][0]
169
+ yield id_, data