|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
import json |
|
import os |
|
|
|
import PIL.Image |
|
import datasets |
|
import numpy as np |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
"8x8": [ |
|
|
|
"https://huggingface.co/datasets/Prisma-Multimodal/segmented-imagenet1k-subset/resolve/main/images.tar.gz?download=true", |
|
|
|
"https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/annotations/{split}_annotations/mask.tar.gz?download=true", |
|
"https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/{split}.jsonl?download=true" |
|
] |
|
} |
|
|
|
|
|
class PatchyImagenet(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("0.0.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
|
|
datasets.BuilderConfig(name="8x8", version=VERSION, description="Patchy Imagenet with 8x8 resolution"), |
|
|
|
|
|
|
|
] |
|
DEFAULT_CONFIG_NAME = "8x8" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"patches": datasets.Features( |
|
{ |
|
|
|
|
|
"categories": datasets.Sequence(datasets.Value("string")), |
|
"scores": datasets.Sequence(datasets.Value("float32")), |
|
"mask": datasets.Sequence( |
|
datasets.Array2D(shape=(224 // 8, 224 // 8), dtype="bool") |
|
), |
|
|
|
|
|
} |
|
), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
url_templates = _URLS[self.config.name] |
|
|
|
split_kwargs = {} |
|
for split in ["train", "test", "val"]: |
|
urls = [url.format(split=split) for url in url_templates] |
|
image_dir, mask_dir, metadata_file = dl_manager.download_and_extract(urls) |
|
split_kwargs[split] = { |
|
"meta_path": metadata_file, |
|
"image_dir": image_dir, "mask_dir": mask_dir, |
|
"split": split |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=split_kwargs["train"]), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=split_kwargs["val"]), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=split_kwargs["test"]), |
|
] |
|
|
|
def _generate_examples(self, meta_path, image_dir, mask_dir, split): |
|
with open(meta_path, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
image_path = os.path.join(image_dir, "images", f"{split}_images", data["file_name"]) |
|
sample_name, _extension = os.path.splitext(data["file_name"]) |
|
mask_file = os.path.join(mask_dir, "masks", sample_name + ".npy") |
|
mask = np.load(mask_file).astype(bool) |
|
|
|
yield key, { |
|
"image": PIL.Image.open(image_path), |
|
"patches": { |
|
"categories": data["patches"]["categories"], |
|
"scores": data["patches"]["scores"], |
|
"mask": mask, |
|
} |
|
} |
|
|