zstd / zstd.py
iohadrubin's picture
Update zstd.py
df2c6bb
"""Zstd dataset based on Common Crawl."""
import gzip
import json
import datasets
import more_itertools
import numpy as np
import io
import json
import dataclasses
import pyzstd
from tensorflow.io import gfile
logger = datasets.logging.get_logger(__name__)
@dataclasses.dataclass
class State:
file_index: int
file_position: int
def to_int(self) -> int:
return (self.file_position << 17) | self.file_index
@classmethod
def from_int(cls, value: int):
file_position = value >> 17
file_index = value & ((1 << 17) - 1)
return cls(file_index, file_position)
class ZstdReader:
def __init__(self,
filepaths,
worker_id,
num_workers,
file_loc=0,
buffer_size=65536,
deserialize_func=None,
):
self.filepaths = filepaths
n_shards = len(self.filepaths)
if n_shards == 0:
raise ValueError("No shards found")
logger.info(f"Found {len(self.filepaths)} files")
self.buffer_size = buffer_size
self.state = State.from_int(file_loc)
self.worker_id = worker_id
self.num_workers = num_workers
assert worker_id < num_workers, "worker_id must be less than num_workers"
if n_shards < num_workers:
self.workers_per_shard = num_workers // n_shards
self.filepaths = [self.filepaths[worker_id % n_shards]]
self.internal_worker_id = int(worker_id // n_shards)
else:
self.workers_per_shard = None
self.filepaths = list(more_itertools.distribute(num_workers, self.filepaths)[worker_id])
logger.info(f"Using {len(self.filepaths)} files")
if deserialize_func is None:
self.deserialize_func = deserialize
else:
self.deserialize_func = deserialize_func
def __iter__(self):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
while self.state.file_index<len(self.filepaths):
filepath = self.filepaths[self.state.file_index]
with gfile.GFile(filepath, 'rb') as f:
with pyzstd.ZstdFile(f, 'rb') as ifo:
raw_reader = MultiBytesIOReader(ifo,
buffer_size=self.buffer_size,
file_position=self.state.file_position)
if self.workers_per_shard is not None:
reader = more_itertools.distribute(self.workers_per_shard, raw_reader)[self.internal_worker_id]
else:
reader = raw_reader
for example in reader:
self.state.file_position = raw_reader.tell()
example = self.deserialize_func(example)
example["file_loc"] = self.state.to_int()
yield example
self.state.file_position = 0
self.state.file_index += 1
self.state = State.from_int(0)
def deserialize(data_point):
return json.loads(data_point.decode('utf-8'))
class MultiBytesIOReader:
def __init__(self,
decompressedStream: io.BytesIO,
buffer_size=65536,
file_position=0,
):
self.decompressedStream = decompressedStream
self.buffer_size = buffer_size
self.incomplete_line = bytearray()
self.position = file_position
def seek(self, position):
self.position = position
def tell(self):
return self.position
def __iter__(self):
self.decompressedStream.seek(self.position)
while True:
buffer = self.decompressedStream.read(self.buffer_size)
if not buffer:
break
buffer = self.incomplete_line + buffer
self.incomplete_line = bytearray()
lines = buffer.split(b'\n')
if lines and lines[-1]:
self.incomplete_line = lines.pop()
for line in lines:
if line:
self.position += len(line)+1
yield line
if self.incomplete_line:
self.position += len(self.incomplete_line)
yield self.incomplete_line
class ZstdConfig(datasets.BuilderConfig):
"""BuilderConfig for Zstd."""
def __init__(self,
data_url=None,
cluster_layout=None,
worker_id=None,
n_workers=None,
filepaths=None,
file_loc=None,
buffer_size=65536,
cluster_spec=None,
features=None,
**kwargs):
"""BuilderConfig for Zstd.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ZstdConfig, self).__init__(**kwargs)
self.cluster_layout = cluster_layout
self.data_url = data_url
self.worker_id = worker_id
self.n_workers = n_workers
self.filepaths = filepaths
self.file_loc = file_loc
self.buffer_size = buffer_size
self.cluster_spec = cluster_spec
self.features = features
assert (self.cluster_spec is None) or (self.filepaths is None ) #one of these must be None
import numpy as np
import itertools
def get_cluster_tuples(cluster_layout, cluster_spec):
if cluster_spec is None:
cluster_spec = (None,)*len(cluster_layout)
# if cluster_spec is 2d and cluster_layout is 3d, we will pad cluster_spec with Nones at the end.
cluster_spec = cluster_spec + (None,)*(len(cluster_layout)-len(cluster_spec))
dim_list = []
for a,b in zip(cluster_layout,cluster_spec):
if b is None:
dim_list.append(range(a))
else:
dim_list.append([b])
return list(itertools.product(*dim_list))
def _get_filepaths(config):
cluster_layout = config.cluster_layout[config.name]
cluster_spec = config.cluster_spec
if config.filepaths is not None:
filepaths = config.filepaths
else:
tuples = get_cluster_tuples(cluster_layout, cluster_spec)
filepaths = [config.data_url[config.name].format(*tup) for tup in tuples]
return filepaths
class Zstd(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [ZstdConfig()]
def _info(self):
self.data_url = self.config.data_url
self.cluster_layout = self.config.cluster_layout
assert self.cluster_layout is not None
assert self.data_url is not None
self.worker_id = self.config.worker_id
self.n_workers = self.config.n_workers
self.buffer_size = self.config.buffer_size
self.filepaths = _get_filepaths(self.config)
if self.config.features is None:
self.features=datasets.Features(
{
"text": datasets.Value("string"),
"source": datasets.Value("string"),
"url": datasets.Value("string"),
"id": datasets.Value("int32"),
"file_loc": datasets.Value("int64"),
}
)
else:
self.features = self.config.features
if self.config.file_loc is not None:
self.file_loc = self.config.file_loc
else:
self.file_loc = 0
return datasets.DatasetInfo(
features=self.features,
supervised_keys=None,
)
def _split_generators(self, _):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": self.filepaths}),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
dataset = ZstdReader(filepaths=list(filepaths),
worker_id=self.worker_id if self.worker_id is not None else 0,
num_workers=self.n_workers if self.n_workers is not None else 1,
file_loc=self.file_loc,
buffer_size=self.buffer_size,
)
for example in dataset:
url = example["id"]
example["id"] = id_
example["url"] = url
if id_==0:
print(f"{example.keys()=}")
id_ += 1
yield id_, example