|
|
|
|
|
import struct |
|
from functools import partial |
|
from io import BufferedIOBase |
|
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple, Union, cast |
|
|
|
import numpy as np |
|
|
|
|
|
try: |
|
from math import prod |
|
except ImportError: |
|
import operator |
|
from functools import reduce |
|
|
|
def prod(xs): |
|
return reduce(operator.mul, xs, 1) |
|
|
|
|
|
U = Union[bytes, bytearray, str] |
|
TFRecordFeatureSpec = Tuple[Tuple[int, ...], Union[str, np.dtype]] |
|
TFRecordExampleSpec = Dict[str, TFRecordFeatureSpec] |
|
|
|
|
|
|
|
|
|
|
|
TFRecordBinaryData = Union[str, List[str], List[List[str]], List[List[List[Any]]]] |
|
TFRecordExampleFeature = Union[np.ndarray, List[np.ndarray], TFRecordBinaryData] |
|
TFRecordExample = Dict[str, TFRecordExampleFeature] |
|
|
|
|
|
class SequenceExampleSpec(NamedTuple): |
|
context: TFRecordExampleSpec |
|
feature_lists: TFRecordExampleSpec |
|
|
|
|
|
def iterate_tfrecord_file(data: BufferedIOBase) -> Iterator[memoryview]: |
|
length_bytes = bytearray(8) |
|
crc_bytes = bytearray(4) |
|
data_bytes = bytearray(1024) |
|
|
|
while True: |
|
bytes_read = data.readinto(length_bytes) |
|
if bytes_read == 0: |
|
break |
|
elif bytes_read != 8: |
|
raise RuntimeError("Invalid tfrecord file: failed to read the record size.") |
|
if data.readinto(crc_bytes) != 4: |
|
raise RuntimeError("Invalid tfrecord file: failed to read the start token.") |
|
(length,) = struct.unpack("<Q", length_bytes) |
|
if length > len(data_bytes): |
|
data_bytes = data_bytes.zfill(int(length * 1.5)) |
|
data_bytes_view = memoryview(data_bytes)[:length] |
|
if data.readinto(data_bytes_view) != length: |
|
raise RuntimeError("Invalid tfrecord file: failed to read the record.") |
|
if data.readinto(crc_bytes) != 4: |
|
raise RuntimeError("Invalid tfrecord file: failed to read the end token.") |
|
|
|
|
|
yield data_bytes_view |
|
|
|
|
|
def process_feature(feature) -> np.ndarray: |
|
|
|
|
|
field = feature.ListFields()[0] |
|
inferred_typename, value = field[0].name, field[1].value |
|
if inferred_typename == "bytes_list": |
|
pass |
|
elif inferred_typename == "float_list": |
|
value = np.array(value, dtype=np.float32) |
|
elif inferred_typename == "int64_list": |
|
value = np.array(value, dtype=np.int64) |
|
return value |
|
|
|
|
|
def _reshape_list(value, shape): |
|
|
|
flat_list = [] |
|
|
|
def flatten(value): |
|
if isinstance(value, (str, bytes)): |
|
flat_list.append(value) |
|
else: |
|
for x in value: |
|
flatten(x) |
|
|
|
flatten(value) |
|
|
|
|
|
common_divisor = prod(x for x in shape if x != -1) |
|
if sum(1 for x in shape if x == -1) > 1: |
|
raise RuntimeError("Shape can contain at most one dynamic dimension (-1).") |
|
if len(flat_list) % max(common_divisor, 1) != 0: |
|
raise RuntimeError(f"Cannot reshape {len(flat_list)} values into shape {shape}") |
|
shape = [x if x != -1 else (len(flat_list) // common_divisor) for x in shape] |
|
|
|
|
|
def _reshape(value, shape): |
|
if len(shape) == 0: |
|
assert len(value) == 1 |
|
return value[0] |
|
elif len(shape) == 1: |
|
assert len(value) == shape[0] |
|
return value |
|
dim_size = len(value) // shape[0] |
|
return [_reshape(value[i * dim_size : (i + 1) * dim_size], shape[1:]) for i in range(dim_size)] |
|
|
|
return _reshape(flat_list, shape) |
|
|
|
|
|
def _apply_feature_spec(value, feature_spec): |
|
if isinstance(value, np.ndarray): |
|
if feature_spec is not None: |
|
shape, dtype = feature_spec |
|
if isinstance(dtype, (str, np.dtype)): |
|
if shape: |
|
value = value.reshape(shape) |
|
value = value.astype(dtype) |
|
elif shape: |
|
|
|
value = _reshape_list(value, shape) |
|
return value |
|
|
|
|
|
def _parse_tfrecord_features(features, spec: Optional[TFRecordExampleSpec]) -> Dict[str, np.ndarray]: |
|
result = {} |
|
features = features.feature |
|
for key in features.keys(): |
|
if spec is not None and key not in spec: |
|
continue |
|
feature_spec = None if spec is None else spec[key] |
|
feature = features[key] |
|
result[key] = _apply_feature_spec(process_feature(feature), feature_spec) |
|
return result |
|
|
|
|
|
def parse_tfrecord_sequence_example(example, spec: Optional[TFRecordExampleSpec]) -> TFRecordExample: |
|
|
|
result = cast(TFRecordExample, _parse_tfrecord_features(example.context, spec)) |
|
|
|
|
|
feature_lists_keys = None if spec is None else set(spec.keys()) - set(result.keys()) |
|
features = example.feature_lists.feature_list |
|
for key in features.keys(): |
|
if feature_lists_keys is not None and key not in feature_lists_keys: |
|
continue |
|
feature_spec = None if spec is None else spec[key] |
|
feature = features[key].feature |
|
if key in result: |
|
raise RuntimeError( |
|
"TFRecord example's key {key} is contained in both the context and feature lists. This is not supported." |
|
) |
|
|
|
value: Union[np.ndarray, List[Any]] = list(map(partial(process_feature), feature)) |
|
|
|
|
|
if feature_spec is not None and isinstance(feature_spec[1], (str, np.dtype)): |
|
value = np.stack(cast(List[np.ndarray], value), 0) |
|
value = _apply_feature_spec(value, feature_spec) |
|
result[key] = value |
|
if spec is not None and len(result.keys()) != len(spec.keys()): |
|
raise RuntimeError(f"Example is missing some required keys: {sorted(result.keys())} != {sorted(spec.keys())}") |
|
return result |
|
|