Datasets:

Tasks:
Other
Languages:
English
Multilinguality:
monolingual
Size Categories:
100M<n<1B
ArXiv:
Tags:
License:
VictorSanh HF staff commited on
Commit
da0475a
1 Parent(s): c16ac20

first attempt

Browse files
Files changed (1) hide show
  1. p3.py +244 -0
p3.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 BigScience Contributors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """P3"""
16
+
17
+
18
+ import datasets
19
+ import glob
20
+ import json
21
+ import os
22
+ from collections import defaultdict
23
+ import tensorflow as tf
24
+
25
+
26
+ _CITATION = """\
27
+ TODO"""
28
+
29
+ _DESCRIPTION = """\
30
+ TODO
31
+ """
32
+
33
+ _LICENSE = "Apache License 2.0"
34
+
35
+ _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"
36
+
37
+ _DATA_PATH = "./data/"
38
+
39
+
40
+ def load_cached_task(cache_dir, split):
41
+ # TODO(Victor): this info.*.json is actually done twice... -> factorize
42
+ with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
43
+ split_info = json.load(f)
44
+ features = split_info["features"]
45
+
46
+ # Use `FixedLenSequenceFeature` for sequences with variable length.
47
+ def _feature_config(shape, dtype):
48
+ if dtype in ("int32", "bool"):
49
+ # int32 and bool are stored as int64 in the tf.train.Example protobuf.
50
+ dtype = "int64"
51
+ if shape and shape[0] is None:
52
+ return tf.io.FixedLenSequenceFeature(
53
+ shape[1:], dtype, allow_missing=True
54
+ )
55
+ return tf.io.FixedLenFeature(shape, dtype)
56
+
57
+ feature_description = {
58
+ feat: _feature_config(**desc) for feat, desc in features.items()
59
+ }
60
+
61
+ tfrecords = os.path.join(
62
+ cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
63
+ )
64
+ ds = tf.data.TFRecordDataset(tf.io.gfile.glob(tfrecords))
65
+ ds = ds.map(
66
+ lambda pb: tf.io.parse_single_example(pb, feature_description),
67
+ num_parallel_calls=tf.data.experimental.AUTOTUNE
68
+ )
69
+ # Cast features back to the types from the info JSON since some features
70
+ # must be cast for storage (e.g., in32 is stored as int64).
71
+ ds = ds.map(
72
+ lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
73
+ num_parallel_calls=tf.data.experimental.AUTOTUNE
74
+ )
75
+ return ds
76
+
77
+
78
+ def find_task_splits_and_features():
79
+ """Find the available tasks under ./data and their available splits and features."""
80
+ task_and_their_splits = defaultdict(dict)
81
+ for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
82
+ folder_path = os.path.dirname(stats)
83
+ task_name = folder_path.split("/")[-1]
84
+ split_name = os.path.basename(stats).split(".")[1]
85
+
86
+ if not os.path.exists(f"{folder_path}/COMPLETED"):
87
+ continue
88
+
89
+ with open(stats, "r") as f:
90
+ split_stats = json.load(f)
91
+ nb_examples = split_stats["examples"]
92
+
93
+ if nb_examples > 0:
94
+ with open(os.path.join(folder_path, f"info.{split_name}.json")) as f:
95
+ split_info = json.load(f)
96
+ features = split_info["features"]
97
+
98
+ # All splits under the same task have the same features dictionary (and thus the same features list)
99
+ if task_and_their_splits[task_name] == {}:
100
+ task_and_their_splits[task_name] = {
101
+ "splits": [],
102
+ "features": [],
103
+ }
104
+
105
+ task_and_their_splits[task_name]["splits"].append(split_name)
106
+ if task_and_their_splits[task_name]["features"] == []:
107
+ task_and_their_splits[task_name]["features"] = sorted(list(features.keys()))
108
+ else:
109
+ assert task_and_their_splits[task_name]["features"] == sorted(list(features.keys()))
110
+ return task_and_their_splits
111
+
112
+
113
+ TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
114
+
115
+
116
+
117
+ class P3Config(datasets.BuilderConfig):
118
+ """BuilderConfig for P3."""
119
+
120
+ def __init__(self, splits, features, score_eval, **kwargs):
121
+ """BuilderConfig for P3.
122
+
123
+ Args:
124
+ splits: `List[str]`, the lists of splits which are available for this task
125
+ features: `List[str]`, the list of features for this task
126
+ score_eval: `bool`, whether this is task formulated as a rank classification problem
127
+ **kwargs: keyword arguments forwarded to super.
128
+ """
129
+ # Version history:
130
+ # 0.1 initial commit
131
+ super(P3Config, self).__init__(version=datasets.Version("0.1.0"), **kwargs)
132
+ self.splits = splits
133
+ self.features = features
134
+ self.score_eval = score_eval
135
+
136
+
137
+ class P3(datasets.GeneratorBasedBuilder):
138
+ """Subset of P3 used in `Multitask Prompted Training Enables Zero-Shot Task Generalization`"""
139
+
140
+ BUILDER_CONFIGS = [
141
+ P3Config(
142
+ name=task_name,
143
+ splits=splits_and_features["splits"],
144
+ features=splits_and_features["features"],
145
+ score_eval=task_name.endswith("score_eval")
146
+ )
147
+ for task_name, splits_and_features in TASK_SPLITS_AND_FEATURES.items()
148
+ ]
149
+
150
+ def _info(self):
151
+ # All features available are: 'inputs', 'inputs_pretokenized', 'targets',
152
+ # 'targets_pretokenized', 'idx', 'is_correct', 'weight', and 'answer_choices'
153
+ _FEAT_MAPPING = {
154
+ "answer_choices": datasets.Sequence(datasets.Value("string")),
155
+ "inputs": datasets.Sequence(datasets.Value("int32")),
156
+ "inputs_pretokenized": datasets.Value("string"),
157
+ "targets": datasets.Sequence(datasets.Value("int32")),
158
+ "targets_pretokenized": datasets.Value("string"),
159
+ "idx": datasets.Sequence(datasets.Value("int32")),
160
+ "weight": datasets.Value("float32"),
161
+ "is_correct": datasets.Value("bool"),
162
+ }
163
+
164
+ features = {}
165
+ for feat_name in self.config.features:
166
+ features[feat_name] = _FEAT_MAPPING[feat_name]
167
+
168
+ return datasets.DatasetInfo(
169
+ description=_DESCRIPTION,
170
+ features=datasets.Features(features),
171
+ supervised_keys=None,
172
+ homepage=_HOMEPAGE,
173
+ citation=_CITATION,
174
+ license=_LICENSE,
175
+ )
176
+
177
+ def _split_generators(self, dl_manager):
178
+ split_generators = []
179
+ if "train" in self.config.splits:
180
+ split_generators.append(
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TRAIN,
183
+ gen_kwargs={
184
+ "data_folder": os.path.join(_DATA_PATH, self.config.name),
185
+ "split": "train",
186
+ }
187
+ )
188
+ )
189
+ if "validation" in self.config.splits:
190
+ split_generators.append(
191
+ datasets.SplitGenerator(
192
+ name=datasets.Split.VALIDATION,
193
+ gen_kwargs={
194
+ "data_folder": os.path.join(_DATA_PATH, self.config.name),
195
+ "split": "validation",
196
+ }
197
+ )
198
+ )
199
+ if "test" in self.config.splits:
200
+ split_generators.append(
201
+ datasets.SplitGenerator(
202
+ name=datasets.Split.TEST,
203
+ gen_kwargs={
204
+ "data_folder": os.path.join(_DATA_PATH, self.config.name),
205
+ "split": "test",
206
+ }
207
+ )
208
+ )
209
+ # Handle splits that are not train, validation or test
210
+ special_splits = set(self.config.splits) - set(["train", "validation", "test"])
211
+ for special_split_name in special_splits:
212
+ split_generators.append(
213
+ datasets.SplitGenerator(
214
+ name=datasets.Split(special_split_name),
215
+ gen_kwargs={
216
+ "data_folder": os.path.join(_DATA_PATH, self.config.name),
217
+ "split": special_split_name,
218
+ }
219
+ )
220
+ )
221
+ return split_generators
222
+
223
+
224
+ def _generate_examples(self, data_folder, split):
225
+ """This function returns the examples in the raw (text) form."""
226
+ _FEAT_MAPPING_FUNCTIONS = {
227
+ "answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
228
+ "inputs": lambda x: x.tolist(),
229
+ "inputs_pretokenized": lambda x: x.decode("utf-8"),
230
+ "targets": lambda x: x.tolist(),
231
+ "targets_pretokenized": lambda x: x.decode("utf-8"),
232
+ "idx": lambda x: x.tolist(),
233
+ "weight": lambda x: float(x),
234
+ "is_correct": lambda x: x,
235
+ }
236
+
237
+ key = 0
238
+ ds = load_cached_task(data_folder, split)
239
+ for ex in ds.as_numpy_iterator():
240
+ ex_dict = {}
241
+ for feat_name, feat_value in ex.items():
242
+ ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value)
243
+ yield key, ex_dict
244
+ key += 1