import datasets import glob import os import numpy as np SHARD_SIZE = 2500 NUM_SHARDS = 40 _URLS = { 'train': [ f'https://huggingface.co/datasets/commaai/commavq/resolve/main/data_{i*SHARD_SIZE}_to_{(i+1)*SHARD_SIZE}.zip' for i in range(NUM_SHARDS) ], 'val': f'https://huggingface.co/datasets/commaai/commavq/resolve/main/val.zip' } _DESCRIPTION = """\ TODO """ class CommaVQ(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( {"path": datasets.Value("string")} ) ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" dl_manager.download_config.ignore_url_params = True downloaded_files = dl_manager.download(_URLS) local_extracted_archive = dl_manager.extract(downloaded_files) if not dl_manager.is_streaming else None return [ datasets.SplitGenerator( name=f'train_{i}', gen_kwargs={"local_extracted_archive":local_extracted_archive['train'][i], "files": dl_manager.iter_archive(downloaded_files['train'][i])} ) for i in range(len(downloaded_files['train']))] + [ datasets.SplitGenerator( name='val', gen_kwargs={"local_extracted_archive":local_extracted_archive['val'], "files": dl_manager.iter_archive(downloaded_files['val'])} ) ] def _generate_examples(self, local_extracted_archive, files): files = glob.glob(os.path.join(local_extracted_archive, '*.npy')) for path in files: file_name = os.path.basename(path) yield file_name, {'path': path} def _get_examples_iterable_for_split(self, split_generator): for path in split_generator.gen_kwargs['files']: yield path[0], {'path': path[0]}