import datasets import pyarrow as pa import pyarrow.parquet as pq logger = datasets.utils.logging.get_logger(__name__) _URLS = { "train": "https://huggingface.co/datasets/moska/test_parquet/resolve/main/data/example.parquet" } class ParquetDatasetConfig(datasets.BuilderConfig): """BuilderConfig """ def __init__(self, **kwargs): """BuilderConfig. Args: **kwargs: keyword arguments forwarded to super. """ super(ParquetDatasetConfig, self).__init__(**kwargs) class ParquetDataset(datasets.ArrowBasedBuilder): BUILDER_CONFIGS = [ ParquetDatasetConfig( name="parquet", description=f"test_parquet dataset.", ) ] def _info(self): return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description="reading parquet format.", # This defines the different columns of the dataset and their types features=datasets.Features( { "pop_est": datasets.Value(dtype="float64"), "continent": datasets.Value(dtype="string"), "name": datasets.Value(dtype="string"), "iso_a3": datasets.Value(dtype="string"), "gdp_md_est": datasets.Value(dtype="int64"), "geometry": datasets.Value("binary"), # These are the features of your dataset like images, labels ... } ), ) def _split_generators(self, dl_manager: datasets.download.DownloadManager): files = _URLS downloaded_files = dl_manager.download(files) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_tables(self, filepath): with open(filepath, mode="rb") as f: parquet_file = pq.ParquetFile(source=filepath) for batch_idx, record_batch in enumerate(parquet_file.iter_batches()): pa_table = pa.Table.from_batches([record_batch]) yield f"{batch_idx}", pa_table