Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
mariosasko commited on
Commit
54bc4c0
1 Parent(s): 248047a

Streaming support

Browse files
P3.py CHANGED
@@ -14,10 +14,14 @@
14
  # limitations under the License.
15
  """P3 (Public Pool of Prompts)"""
16
 
 
 
 
17
 
18
  import datasets
19
- import tensorflow as tf
20
 
 
 
21
  from .tasks_splits_and_features import _TASK_SPLITS_AND_FEATURES_DICT
22
 
23
 
@@ -44,44 +48,14 @@ _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"
44
 
45
  _DATA_PATH = "data"
46
 
47
-
48
  logger = datasets.logging.get_logger(__name__)
49
 
50
-
51
- def load_cached_task(features_dict, tfrecord):
52
- # Use `FixedLenSequenceFeature` for sequences with variable length.
53
- def _feature_config(shape, dtype):
54
- if dtype in ("int32", "bool"):
55
- # int32 and bool are stored as int64 in the tf.train.Example protobuf.
56
- dtype = "int64"
57
- if shape and shape[0] is None:
58
- return tf.io.FixedLenSequenceFeature(
59
- shape[1:], dtype, allow_missing=True
60
- )
61
- return tf.io.FixedLenFeature(shape, dtype)
62
-
63
- feature_description = {
64
- feat: _feature_config(**desc) for feat, desc in features_dict.items()
65
- }
66
-
67
- ds = tf.data.TFRecordDataset(tfrecord)
68
- ds = ds.map(
69
- lambda pb: tf.io.parse_single_example(pb, feature_description),
70
- num_parallel_calls=tf.data.experimental.AUTOTUNE
71
- )
72
- # Cast features back to the types from the info JSON since some features
73
- # must be cast for storage (e.g., int32 is stored as int64).
74
- ds = ds.map(
75
- lambda x: {k: tf.cast(v, features_dict[k]["dtype"]) for k, v in x.items()},
76
- num_parallel_calls=tf.data.experimental.AUTOTUNE
77
- )
78
- return ds
79
-
80
-
81
  _URLs = {
82
  task_name: {
83
  split_name: [
84
- f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001", # TODO -> handle multiple shards
 
 
85
  ]
86
  for split_name in splits_and_features_dict["splits"]
87
  }
@@ -117,7 +91,7 @@ class P3(datasets.GeneratorBasedBuilder):
117
  name=task_name,
118
  splits=splits_and_features_dict["splits"],
119
  features_dict=splits_and_features_dict["features_dict"],
120
- score_eval=task_name.endswith("score_eval")
121
  )
122
  for task_name, splits_and_features_dict in _TASK_SPLITS_AND_FEATURES_DICT.items()
123
  ]
@@ -136,10 +110,7 @@ class P3(datasets.GeneratorBasedBuilder):
136
  "is_correct": datasets.Value("bool"),
137
  }
138
 
139
- features = {}
140
- for feat_name in self.config.features_dict.keys():
141
- features[feat_name] = _FEAT_MAPPING[feat_name]
142
-
143
  return datasets.DatasetInfo(
144
  description=_DESCRIPTION,
145
  features=datasets.Features(features),
@@ -158,8 +129,8 @@ class P3(datasets.GeneratorBasedBuilder):
158
  datasets.SplitGenerator(
159
  name=datasets.Split.TRAIN,
160
  gen_kwargs={
161
- "tfrecord": data_dir[split_name],
162
- }
163
  )
164
  )
165
  if "validation" in self.config.splits:
@@ -168,8 +139,8 @@ class P3(datasets.GeneratorBasedBuilder):
168
  datasets.SplitGenerator(
169
  name=datasets.Split.VALIDATION,
170
  gen_kwargs={
171
- "tfrecord": data_dir[split_name],
172
- }
173
  )
174
  )
175
  if "test" in self.config.splits:
@@ -178,8 +149,8 @@ class P3(datasets.GeneratorBasedBuilder):
178
  datasets.SplitGenerator(
179
  name=datasets.Split.TEST,
180
  gen_kwargs={
181
- "tfrecord": data_dir[split_name],
182
- }
183
  )
184
  )
185
  # Handle splits that are not train, validation or test
@@ -190,32 +161,42 @@ class P3(datasets.GeneratorBasedBuilder):
190
  name=datasets.Split(special_split_name),
191
  gen_kwargs={
192
  "tfrecord": data_dir[special_split_name],
193
- }
194
  )
195
  )
196
  return split_generators
197
 
198
-
199
- def _generate_examples(self, tfrecord):
200
  """This function returns the examples in the raw (text) form."""
201
- _FEAT_MAPPING_FUNCTIONS = {
202
  "answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
203
  "inputs": lambda x: x.tolist(),
204
- "inputs_pretokenized": lambda x: x.decode("utf-8"),
205
  "targets": lambda x: x.tolist(),
206
- "targets_pretokenized": lambda x: x.decode("utf-8"),
207
  "idx": lambda x: x.tolist(),
208
  "weight": lambda x: float(x),
209
  "is_correct": lambda x: x,
210
  }
211
 
212
- key = 0
213
- features_dict = self.config.features_dict
214
- ds = load_cached_task(features_dict, tfrecord)
215
-
216
- for ex in ds.as_numpy_iterator():
217
- ex_dict = {}
218
- for feat_name, feat_value in ex.items():
219
- ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value)
220
- yield key, ex_dict
221
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
14
  # limitations under the License.
15
  """P3 (Public Pool of Prompts)"""
16
 
17
+ import os
18
+
19
+ import google.protobuf as _protobuf
20
 
21
  import datasets
 
22
 
23
+ from ._tfrecord_example_pb2 import SequenceExample
24
+ from .io_utils import iterate_tfrecord_file, parse_tfrecord_sequence_example
25
  from .tasks_splits_and_features import _TASK_SPLITS_AND_FEATURES_DICT
26
 
27
 
 
48
 
49
  _DATA_PATH = "data"
50
 
 
51
  logger = datasets.logging.get_logger(__name__)
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  _URLs = {
54
  task_name: {
55
  split_name: [
56
+ os.path.join(
57
+ _DATA_PATH, task_name, split_name + ".tfrecord-00000-of-00001"
58
+ ), # TODO -> handle multiple shards
59
  ]
60
  for split_name in splits_and_features_dict["splits"]
61
  }
 
91
  name=task_name,
92
  splits=splits_and_features_dict["splits"],
93
  features_dict=splits_and_features_dict["features_dict"],
94
+ score_eval=task_name.endswith("score_eval"),
95
  )
96
  for task_name, splits_and_features_dict in _TASK_SPLITS_AND_FEATURES_DICT.items()
97
  ]
 
110
  "is_correct": datasets.Value("bool"),
111
  }
112
 
113
+ features = {feat_name: _FEAT_MAPPING[feat_name] for feat_name in self.config.features_dict.keys()}
 
 
 
114
  return datasets.DatasetInfo(
115
  description=_DESCRIPTION,
116
  features=datasets.Features(features),
 
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
131
  gen_kwargs={
132
+ "tfrecord_files": data_dir[split_name],
133
+ },
134
  )
135
  )
136
  if "validation" in self.config.splits:
 
139
  datasets.SplitGenerator(
140
  name=datasets.Split.VALIDATION,
141
  gen_kwargs={
142
+ "tfrecord_files": data_dir[split_name],
143
+ },
144
  )
145
  )
146
  if "test" in self.config.splits:
 
149
  datasets.SplitGenerator(
150
  name=datasets.Split.TEST,
151
  gen_kwargs={
152
+ "tfrecord_files": data_dir[split_name],
153
+ },
154
  )
155
  )
156
  # Handle splits that are not train, validation or test
 
161
  name=datasets.Split(special_split_name),
162
  gen_kwargs={
163
  "tfrecord": data_dir[special_split_name],
164
+ },
165
  )
166
  )
167
  return split_generators
168
 
169
+ def _generate_examples(self, tfrecord_files):
 
170
  """This function returns the examples in the raw (text) form."""
171
+ _POST_PROC_FUNCTIONS = {
172
  "answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
173
  "inputs": lambda x: x.tolist(),
174
+ "inputs_pretokenized": lambda x: x[0].decode("utf-8"),
175
  "targets": lambda x: x.tolist(),
176
+ "targets_pretokenized": lambda x: x[0].decode("utf-8"),
177
  "idx": lambda x: x.tolist(),
178
  "weight": lambda x: float(x),
179
  "is_correct": lambda x: x,
180
  }
181
 
182
+ def _prepare_col_spec(shape, dtype):
183
+ if dtype in ("int32", "bool"):
184
+ # int32 and bool are stored as int64 in the tf.train.Example protobuf.
185
+ dtype = "int64"
186
+ elif dtype == "string":
187
+ dtype = "str"
188
+ if shape and shape[0] is None:
189
+ shape = (-1, *shape[1:])
190
+ return (shape, dtype)
191
+
192
+ spec = {k: _prepare_col_spec(**v) for k, v in self.config.features_dict.items()}
193
+ idx = 0
194
+ for tfrecord_file in tfrecord_files:
195
+ with open(tfrecord_file, "rb") as f:
196
+ for example_bytes in iterate_tfrecord_file(f):
197
+ example = SequenceExample()
198
+ example.ParseFromString(example_bytes)
199
+ example = parse_tfrecord_sequence_example(example, spec)
200
+ example = {k: _POST_PROC_FUNCTIONS[k](v) for k, v in example.items()}
201
+ yield idx, example
202
+ idx += 1
_tfrecord_example_pb2.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e227d1c6e389901c2ec71b36b8d73b0b7711b14c42962a837f01c197056f2c
3
+ size 21378
io_utils.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code copied from: https://github.com/pytorch/data/blob/d9bbbecf64d0149795dc65ba390b50bc9e176e95/torchdata/datapipes/iter/util/tfrecordloader.py
2
+
3
+ import struct
4
+ from functools import partial
5
+ from io import BufferedIOBase
6
+ from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple, Union, cast
7
+
8
+ import numpy as np
9
+
10
+
11
+ try:
12
+ from math import prod
13
+ except ImportError:
14
+ import operator
15
+ from functools import reduce
16
+
17
+ def prod(xs):
18
+ return reduce(operator.mul, xs, 1)
19
+
20
+
21
+ U = Union[bytes, bytearray, str]
22
+ TFRecordFeatureSpec = Tuple[Tuple[int, ...], Union[str, np.dtype]]
23
+ TFRecordExampleSpec = Dict[str, TFRecordFeatureSpec]
24
+
25
+ # Note, reccursive types not supported by mypy at the moment
26
+ # TODO(640): uncomment as soon as it becomes supported
27
+ # https://github.com/python/mypy/issues/731
28
+ # BinaryData = Union[str, List['BinaryData']]
29
+ TFRecordBinaryData = Union[str, List[str], List[List[str]], List[List[List[Any]]]]
30
+ TFRecordExampleFeature = Union[np.ndarray, List[np.ndarray], TFRecordBinaryData]
31
+ TFRecordExample = Dict[str, TFRecordExampleFeature]
32
+
33
+
34
+ class SequenceExampleSpec(NamedTuple):
35
+ context: TFRecordExampleSpec
36
+ feature_lists: TFRecordExampleSpec
37
+
38
+
39
+ def iterate_tfrecord_file(data: BufferedIOBase) -> Iterator[memoryview]:
40
+ length_bytes = bytearray(8)
41
+ crc_bytes = bytearray(4)
42
+ data_bytes = bytearray(1024)
43
+
44
+ while True:
45
+ bytes_read = data.readinto(length_bytes)
46
+ if bytes_read == 0:
47
+ break
48
+ elif bytes_read != 8:
49
+ raise RuntimeError("Invalid tfrecord file: failed to read the record size.")
50
+ if data.readinto(crc_bytes) != 4:
51
+ raise RuntimeError("Invalid tfrecord file: failed to read the start token.")
52
+ (length,) = struct.unpack("<Q", length_bytes)
53
+ if length > len(data_bytes):
54
+ data_bytes = data_bytes.zfill(int(length * 1.5))
55
+ data_bytes_view = memoryview(data_bytes)[:length]
56
+ if data.readinto(data_bytes_view) != length:
57
+ raise RuntimeError("Invalid tfrecord file: failed to read the record.")
58
+ if data.readinto(crc_bytes) != 4:
59
+ raise RuntimeError("Invalid tfrecord file: failed to read the end token.")
60
+
61
+ # TODO(641): check CRC
62
+ yield data_bytes_view
63
+
64
+
65
+ def process_feature(feature) -> np.ndarray:
66
+ # NOTE: We assume that each key in the example has only one field
67
+ # (either "bytes_list", "float_list", or "int64_list")!
68
+ field = feature.ListFields()[0]
69
+ inferred_typename, value = field[0].name, field[1].value
70
+ if inferred_typename == "bytes_list":
71
+ pass
72
+ elif inferred_typename == "float_list":
73
+ value = np.array(value, dtype=np.float32)
74
+ elif inferred_typename == "int64_list":
75
+ value = np.array(value, dtype=np.int64)
76
+ return value
77
+
78
+
79
+ def _reshape_list(value, shape):
80
+ # Flatten list
81
+ flat_list = []
82
+
83
+ def flatten(value):
84
+ if isinstance(value, (str, bytes)):
85
+ flat_list.append(value)
86
+ else:
87
+ for x in value:
88
+ flatten(x)
89
+
90
+ flatten(value)
91
+
92
+ # Compute correct shape
93
+ common_divisor = prod(x for x in shape if x != -1)
94
+ if sum(1 for x in shape if x == -1) > 1:
95
+ raise RuntimeError("Shape can contain at most one dynamic dimension (-1).")
96
+ if len(flat_list) % max(common_divisor, 1) != 0:
97
+ raise RuntimeError(f"Cannot reshape {len(flat_list)} values into shape {shape}")
98
+ shape = [x if x != -1 else (len(flat_list) // common_divisor) for x in shape]
99
+
100
+ # Reshape list into the correct shape
101
+ def _reshape(value, shape):
102
+ if len(shape) == 0:
103
+ assert len(value) == 1
104
+ return value[0]
105
+ elif len(shape) == 1: # To make the reccursion faster
106
+ assert len(value) == shape[0]
107
+ return value
108
+ dim_size = len(value) // shape[0]
109
+ return [_reshape(value[i * dim_size : (i + 1) * dim_size], shape[1:]) for i in range(dim_size)]
110
+
111
+ return _reshape(flat_list, shape)
112
+
113
+
114
+ def _apply_feature_spec(value, feature_spec):
115
+ if isinstance(value, np.ndarray):
116
+ if feature_spec is not None:
117
+ shape, dtype = feature_spec
118
+ if isinstance(dtype, (str, np.dtype)):
119
+ if shape:
120
+ value = value.reshape(shape)
121
+ value = value.astype(dtype)
122
+ elif shape:
123
+ # Manual list reshape
124
+ value = _reshape_list(value, shape)
125
+ return value
126
+
127
+
128
+ def _parse_tfrecord_features(features, spec: Optional[TFRecordExampleSpec]) -> Dict[str, np.ndarray]:
129
+ result = {}
130
+ features = features.feature
131
+ for key in features.keys():
132
+ if spec is not None and key not in spec:
133
+ continue
134
+ feature_spec = None if spec is None else spec[key]
135
+ feature = features[key]
136
+ result[key] = _apply_feature_spec(process_feature(feature), feature_spec)
137
+ return result
138
+
139
+
140
+ def parse_tfrecord_sequence_example(example, spec: Optional[TFRecordExampleSpec]) -> TFRecordExample:
141
+ # Parse context features
142
+ result = cast(TFRecordExample, _parse_tfrecord_features(example.context, spec))
143
+
144
+ # Parse feature lists
145
+ feature_lists_keys = None if spec is None else set(spec.keys()) - set(result.keys())
146
+ features = example.feature_lists.feature_list
147
+ for key in features.keys():
148
+ if feature_lists_keys is not None and key not in feature_lists_keys:
149
+ continue
150
+ feature_spec = None if spec is None else spec[key]
151
+ feature = features[key].feature
152
+ if key in result:
153
+ raise RuntimeError(
154
+ "TFRecord example's key {key} is contained in both the context and feature lists. This is not supported."
155
+ )
156
+
157
+ value: Union[np.ndarray, List[Any]] = list(map(partial(process_feature), feature))
158
+
159
+ # For known numpy dtypes, we stack the list features
160
+ if feature_spec is not None and isinstance(feature_spec[1], (str, np.dtype)):
161
+ value = np.stack(cast(List[np.ndarray], value), 0)
162
+ value = _apply_feature_spec(value, feature_spec)
163
+ result[key] = value
164
+ if spec is not None and len(result.keys()) != len(spec.keys()):
165
+ raise RuntimeError(f"Example is missing some required keys: {sorted(result.keys())} != {sorted(spec.keys())}")
166
+ return result
print_data_split_sizes.py CHANGED
@@ -1,9 +1,9 @@
1
  import glob
2
  import json
3
  import os
4
-
5
  from collections import defaultdict
6
 
 
7
  _DATA_PATH = "data"
8
 
9
  data_split_sizes = defaultdict(dict)
 
1
  import glob
2
  import json
3
  import os
 
4
  from collections import defaultdict
5
 
6
+
7
  _DATA_PATH = "data"
8
 
9
  data_split_sizes = defaultdict(dict)
tasks_splits_and_features.py CHANGED
The diff for this file is too large to render. See raw diff