File size: 1,776 Bytes
2b625b3 53a7514 2b625b3 e0b0bad 8bb2277 e0b0bad 2b625b3 53a7514 2b625b3 53a7514 2b625b3 2629cd7 8bb2277 e0b0bad 8bb2277 e0b0bad 2b625b3 2629cd7 ab73426 2629cd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import datasets
import glob
import os
import numpy as np
SHARD_SIZE = 2500
NUM_SHARDS = 40
_URLS = {
'train': [
f'https://huggingface.co/datasets/commaai/commavq/resolve/main/data_{i*SHARD_SIZE}_to_{(i+1)*SHARD_SIZE}.zip' for i in range(NUM_SHARDS)
],
'val': f'https://huggingface.co/datasets/commaai/commavq/resolve/main/val.zip'
}
_DESCRIPTION = """\
TODO
"""
class CommaVQ(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"path": datasets.Value("string")}
)
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_manager.download_config.ignore_url_params = True
downloaded_files = dl_manager.download(_URLS)
local_extracted_archive = dl_manager.extract(downloaded_files) if not dl_manager.is_streaming else None
return [
datasets.SplitGenerator(
name=f'train_{i}',
gen_kwargs={"local_extracted_archive":local_extracted_archive['train'][i], "files": dl_manager.iter_archive(downloaded_files['train'][i])}
) for i in range(len(downloaded_files['train']))] + [
datasets.SplitGenerator(
name='val',
gen_kwargs={"local_extracted_archive":local_extracted_archive['val'], "files": dl_manager.iter_archive(downloaded_files['val'])}
)
]
def _generate_examples(self, local_extracted_archive, files):
files = glob.glob(os.path.join(local_extracted_archive, '*.npy'))
for path in files:
file_name = os.path.basename(path)
yield file_name, {'path': path}
def _get_examples_iterable_for_split(self, split_generator):
for path in split_generator.gen_kwargs['files']:
yield path[0], {'path': path[0]}
|