test_repo / test_repo.py
manuel-delverme's picture
Upload folder using huggingface_hub
865ea05 verified
raw
history blame
No virus
6.78 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import json
import os
import PIL.Image
import datasets
import numpy as np
for _ in range(10):
print("LOADING SCRIPT")
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"8x8": [
"https://huggingface.co/datasets/Prisma-Multimodal/segmented-imagenet1k-subset/resolve/main/images.tar.gz?download=true",
"https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/annotations/{split}_annotations/mask.tar.gz?download=true",
"https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/{split}.jsonl?download=true"
]
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class PatchyImagenet(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("0.0.1")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
BUILDER_CONFIGS = [
# datasets.BuilderConfig(name="1x1", version=VERSION, description="Patchy Imagenet with 1x1 resolution (this is the original resolution)"),
datasets.BuilderConfig(name="8x8", version=VERSION, description="Patchy Imagenet with 8x8 resolution"),
# datasets.BuilderConfig(name="16x16", version=VERSION, description="Patchy Imagenet with 16x16 resolution"),
# datasets.BuilderConfig(name="32x32", version=VERSION, description="Patchy Imagenet with 32x32 resolution"),
# datasets.BuilderConfig(name="64x64", version=VERSION, description="Patchy Imagenet with 64x64 resolution"),
]
DEFAULT_CONFIG_NAME = "8x8"
def _info(self):
features = datasets.Features(
{
"image": datasets.Image(),
"patches": datasets.Features(
{
# "categories": datasets.Sequence(datasets.ClassLabel(names=_IMAGENET_CLASSES)),
"categories": datasets.Value("string"),
"scores": datasets.Sequence(datasets.Value("float32")),
"mask": datasets.Sequence(
datasets.Array2D(shape=(224 // 8, 224 // 8), dtype="bool")
),
# "mask": datasets.Sequence(datasets.Image()),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
url_templates = _URLS[self.config.name]
split_kwargs = {}
for split in ["train", "test", "val"]:
urls = [url.format(split=split) for url in url_templates]
image_dir, mask_dir, metadata_file = dl_manager.download_and_extract(urls)
# breakpoint()
split_kwargs[split] = {
"meta_path": metadata_file,
"image_dir": image_dir, "mask_dir": mask_dir,
"split": split
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=split_kwargs["train"]),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=split_kwargs["val"]),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=split_kwargs["test"]),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, meta_path, image_dir, mask_dir, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(meta_path, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
image_path = os.path.join(image_dir, "images", f"{split}_images", data["file_name"])
sample_name, _extension = os.path.splitext(data["file_name"])
mask_file = os.path.join(mask_dir, "masks", sample_name + ".npy")
# mask = np.load(mask_file).astype(bool)
mask = np.load(mask_file).astype(np.uint8)
# breakpoint()
pil_image = PIL.Image.open(image_path)
yield key, {
"image": pil_image,
"patches": {
"categories": data["patches"]["categories"],
"scores": data["patches"]["scores"],
"mask": list(mask),
}
}