import os import datasets #datasets.logging.set_verbosity_debug() #datasets.logging.set_verbosity_info() #logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ A segmentation dataset for [TODO: complete...] """ _HOMEPAGE = "https://huggingface.co/datasets/alkzar90/cell_benchmark" _EXTENSION = [".jpg", ".png"] _URL_BASE = "https://huggingface.co/datasets/alkzar90/cell_benchmark/resolve/main/data/" _SPLIT_URLS = { "train": _URL_BASE + "train.zip", "val": _URL_BASE + "val.zip", "test": _URL_BASE + "test.zip", "masks_train": _URL_BASE + "masks/train.zip", "masks_val": _URL_BASE + "masks/val.zip", "masks_test": _URL_BASE + "masks/test.zip", } class Cellsegmentation(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features({ "image": datasets.Image(), "masks": datasets.Image(), #"path" : datasets.Value("string"), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), supervised_keys=("image", "masks"), homepage=_HOMEPAGE, citation="", ) def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_SPLIT_URLS) splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files" : dl_manager.iter_files([data_files["train"]]), "masks": dl_manager.iter_files([data_files["masks_train"]]), "split": "training", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "files" : dl_manager.iter_files([data_files["val"]]), "masks": dl_manager.iter_files([data_files["masks_val"]]), "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "files" : dl_manager.iter_files([data_files["test"]]), "masks": dl_manager.iter_files([data_files["masks_test"]]), "split": "test", } ) ] return splits def _generate_examples(self, files, masks, split): for i, path in enumerate(zip(files, masks)): yield i, { "image": path[0], "masks": path[1], }