Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
File size: 6,283 Bytes
1e02208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# Code copied from: https://github.com/pytorch/data/blob/d9bbbecf64d0149795dc65ba390b50bc9e176e95/torchdata/datapipes/iter/util/tfrecordloader.py

import struct
from functools import partial
from io import BufferedIOBase
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple, Union, cast

import numpy as np


try:
    from math import prod
except ImportError:
    import operator
    from functools import reduce

    def prod(xs):
        return reduce(operator.mul, xs, 1)


U = Union[bytes, bytearray, str]
TFRecordFeatureSpec = Tuple[Tuple[int, ...], Union[str, np.dtype]]
TFRecordExampleSpec = Dict[str, TFRecordFeatureSpec]

#  Note, reccursive types not supported by mypy at the moment
# TODO(640): uncomment as soon as it becomes supported
#  https://github.com/python/mypy/issues/731
#  BinaryData = Union[str, List['BinaryData']]
TFRecordBinaryData = Union[str, List[str], List[List[str]], List[List[List[Any]]]]
TFRecordExampleFeature = Union[np.ndarray, List[np.ndarray], TFRecordBinaryData]
TFRecordExample = Dict[str, TFRecordExampleFeature]


class SequenceExampleSpec(NamedTuple):
    context: TFRecordExampleSpec
    feature_lists: TFRecordExampleSpec


def iterate_tfrecord_file(data: BufferedIOBase) -> Iterator[memoryview]:
    length_bytes = bytearray(8)
    crc_bytes = bytearray(4)
    data_bytes = bytearray(1024)

    while True:
        bytes_read = data.readinto(length_bytes)
        if bytes_read == 0:
            break
        elif bytes_read != 8:
            raise RuntimeError("Invalid tfrecord file: failed to read the record size.")
        if data.readinto(crc_bytes) != 4:
            raise RuntimeError("Invalid tfrecord file: failed to read the start token.")
        (length,) = struct.unpack("<Q", length_bytes)
        if length > len(data_bytes):
            data_bytes = data_bytes.zfill(int(length * 1.5))
        data_bytes_view = memoryview(data_bytes)[:length]
        if data.readinto(data_bytes_view) != length:
            raise RuntimeError("Invalid tfrecord file: failed to read the record.")
        if data.readinto(crc_bytes) != 4:
            raise RuntimeError("Invalid tfrecord file: failed to read the end token.")

        # TODO(641): check CRC
        yield data_bytes_view


def process_feature(feature) -> np.ndarray:
    # NOTE: We assume that each key in the example has only one field
    # (either "bytes_list", "float_list", or "int64_list")!
    field = feature.ListFields()[0]
    inferred_typename, value = field[0].name, field[1].value
    if inferred_typename == "bytes_list":
        pass
    elif inferred_typename == "float_list":
        value = np.array(value, dtype=np.float32)
    elif inferred_typename == "int64_list":
        value = np.array(value, dtype=np.int64)
    return value


def _reshape_list(value, shape):
    # Flatten list
    flat_list = []

    def flatten(value):
        if isinstance(value, (str, bytes)):
            flat_list.append(value)
        else:
            for x in value:
                flatten(x)

    flatten(value)

    # Compute correct shape
    common_divisor = prod(x for x in shape if x != -1)
    if sum(1 for x in shape if x == -1) > 1:
        raise RuntimeError("Shape can contain at most one dynamic dimension (-1).")
    if len(flat_list) % max(common_divisor, 1) != 0:
        raise RuntimeError(f"Cannot reshape {len(flat_list)} values into shape {shape}")
    shape = [x if x != -1 else (len(flat_list) // common_divisor) for x in shape]

    # Reshape list into the correct shape
    def _reshape(value, shape):
        if len(shape) == 0:
            assert len(value) == 1
            return value[0]
        elif len(shape) == 1:  # To make the reccursion faster
            assert len(value) == shape[0]
            return value
        dim_size = len(value) // shape[0]
        return [_reshape(value[i * dim_size : (i + 1) * dim_size], shape[1:]) for i in range(dim_size)]

    return _reshape(flat_list, shape)


def _apply_feature_spec(value, feature_spec):
    if isinstance(value, np.ndarray):
        if feature_spec is not None:
            shape, dtype = feature_spec
            if isinstance(dtype, (str, np.dtype)):
                if shape:
                    value = value.reshape(shape)
                value = value.astype(dtype)
            elif shape:
                # Manual list reshape
                value = _reshape_list(value, shape)
    return value


def _parse_tfrecord_features(features, spec: Optional[TFRecordExampleSpec]) -> Dict[str, np.ndarray]:
    result = {}
    features = features.feature
    for key in features.keys():
        if spec is not None and key not in spec:
            continue
        feature_spec = None if spec is None else spec[key]
        feature = features[key]
        result[key] = _apply_feature_spec(process_feature(feature), feature_spec)
    return result


def parse_tfrecord_sequence_example(example, spec: Optional[TFRecordExampleSpec]) -> TFRecordExample:
    # Parse context features
    result = cast(TFRecordExample, _parse_tfrecord_features(example.context, spec))

    # Parse feature lists
    feature_lists_keys = None if spec is None else set(spec.keys()) - set(result.keys())
    features = example.feature_lists.feature_list
    for key in features.keys():
        if feature_lists_keys is not None and key not in feature_lists_keys:
            continue
        feature_spec = None if spec is None else spec[key]
        feature = features[key].feature
        if key in result:
            raise RuntimeError(
                "TFRecord example's key {key} is contained in both the context and feature lists. This is not supported."
            )

        value: Union[np.ndarray, List[Any]] = list(map(partial(process_feature), feature))

        # For known numpy dtypes, we stack the list features
        if feature_spec is not None and isinstance(feature_spec[1], (str, np.dtype)):
            value = np.stack(cast(List[np.ndarray], value), 0)
        value = _apply_feature_spec(value, feature_spec)
        result[key] = value
    if spec is not None and len(result.keys()) != len(spec.keys()):
        raise RuntimeError(f"Example is missing some required keys: {sorted(result.keys())} != {sorted(spec.keys())}")
    return result