Caltech-101 / Caltech-101.py
SaulLu's picture
use a new random generator
d97c5ae
raw
history blame
9.66 kB
# Copyright 2022 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caltech 101 loading script"""
from __future__ import annotations
from pathlib import Path
import datasets
import numpy as np
import scipy.io
from datasets.tasks import ImageClassification
_CITATION = """\
@article{FeiFei2004LearningGV,
title={Learning Generative Visual Models from Few Training Examples: An Incremental Bayesian Approach Tested on 101 Object Categories},
author={Li Fei-Fei and Rob Fergus and Pietro Perona},
journal={Computer Vision and Pattern Recognition Workshop},
year={2004},
}
"""
_DESCRIPTION = """\
Pictures of objects belonging to 101 categories.
About 40 to 800 images per category.
Most categories have about 50 images.
Collected in September 2003 by Fei-Fei Li, Marco Andreetto, and Marc'Aurelio Ranzato.
The size of each image is roughly 300 x 200 pixels.
"""
_HOMEPAGE = "https://data.caltech.edu/records/20086"
_LICENSE = "CC BY 4.0"
_DATA_URL = "caltech-101.zip"
_NAMES = [
"accordion",
"airplanes",
"anchor",
"ant",
"background_google",
"barrel",
"bass",
"beaver",
"binocular",
"bonsai",
"brain",
"brontosaurus",
"buddha",
"butterfly",
"camera",
"cannon",
"car_side",
"ceiling_fan",
"cellphone",
"chair",
"chandelier",
"cougar_body",
"cougar_face",
"crab",
"crayfish",
"crocodile",
"crocodile_head",
"cup",
"dalmatian",
"dollar_bill",
"dolphin",
"dragonfly",
"electric_guitar",
"elephant",
"emu",
"euphonium",
"ewer",
"faces",
"faces_easy",
"ferry",
"flamingo",
"flamingo_head",
"garfield",
"gerenuk",
"gramophone",
"grand_piano",
"hawksbill",
"headphone",
"hedgehog",
"helicopter",
"ibis",
"inline_skate",
"joshua_tree",
"kangaroo",
"ketch",
"lamp",
"laptop",
"leopards",
"llama",
"lobster",
"lotus",
"mandolin",
"mayfly",
"menorah",
"metronome",
"minaret",
"motorbikes",
"nautilus",
"octopus",
"okapi",
"pagoda",
"panda",
"pigeon",
"pizza",
"platypus",
"pyramid",
"revolver",
"rhino",
"rooster",
"saxophone",
"schooner",
"scissors",
"scorpion",
"sea_horse",
"snoopy",
"soccer_ball",
"stapler",
"starfish",
"stegosaurus",
"stop_sign",
"strawberry",
"sunflower",
"tick",
"trilobite",
"umbrella",
"watch",
"water_lilly",
"wheelchair",
"wild_cat",
"windsor_chair",
"wrench",
"yin_yang",
]
# For some reason, the category names in "101_ObjectCategories" and
# "Annotations" do not always match. This is a manual map between the
# two. Defaults to using same name, since most names are fine.
_ANNOTATION_NAMES_MAP = {
"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2",
}
_TRAIN_POINTS_PER_CLASS = 30
class Caltech101(datasets.GeneratorBasedBuilder):
"""Caltech 101 dataset."""
VERSION = datasets.Version("1.0.0")
_BUILDER_CONFIG_WITH_BACKGROUND = datasets.BuilderConfig(
name="with_background_category",
version=VERSION,
description="Dataset containing the 101 categories and the additonnal background one. "
"No annotations.",
)
_BUILDER_CONFIG_WITHOUT_BACKGROUND = datasets.BuilderConfig(
name="without_background_category",
version=VERSION,
description="Dataset containing only the 101 categories and their annotations "
"(object contours and box position).",
)
BUILDER_CONFIGS = [
_BUILDER_CONFIG_WITH_BACKGROUND,
_BUILDER_CONFIG_WITHOUT_BACKGROUND,
]
def _info(self):
if self.config.name == self._BUILDER_CONFIG_WITHOUT_BACKGROUND.name:
features = datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
"annotation": {
"obj_contour": datasets.features.Array2D(
shape=(2, None), dtype="float64"
),
"box_coord": datasets.features.Array2D(
shape=(1, 4), dtype="int64"
),
},
}
)
else:
features = datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_root_dir = dl_manager.download_and_extract(_DATA_URL)
img_folder_compress_path = [
file
for file in dl_manager.iter_files(data_root_dir)
if Path(file).name == "101_ObjectCategories.tar.gz"
][0]
annotations_folder_compress_path = [
file
for file in dl_manager.iter_files(data_root_dir)
if Path(file).name == "Annotations.tar"
][0]
img_dir = dl_manager.extract(img_folder_compress_path)
annotation_dir = dl_manager.extract(annotations_folder_compress_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"img_dir": Path(img_dir) / "101_ObjectCategories",
"annotation_dir": Path(annotation_dir) / "Annotations",
"split": "train",
"config_name": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"img_dir": Path(img_dir) / "101_ObjectCategories",
"annotation_dir": Path(annotation_dir) / "Annotations",
"split": "test",
"config_name": self.config.name,
},
),
]
def _generate_examples(self, img_dir, annotation_dir, split, config_name):
# Same stratagy as the one proposed in TF datasets: 30 random examples from each class are added to the train
# split, and the remainder are added to the test split.
# Source: https://github.com/tensorflow/datasets/blob/1106d587f97c4fca68c5b593dc7dc48c790ffa8c/tensorflow_datasets/image_classification/caltech.py#L88-L140
is_train_split = split == "train"
rng = np.random.default_rng(1234)
for class_dir in img_dir.iterdir():
class_name = class_dir.name
index_codes = [
image_path.name.split("_")[1][: -len(".jpg")]
for image_path in class_dir.iterdir()
if image_path.name.endswith(".jpg")
]
# _TRAIN_POINTS_PER_CLASS datapoints are sampled for the train split,
# the others constitute the test split.
if _TRAIN_POINTS_PER_CLASS > len(index_codes):
raise ValueError(
f"Fewer than {_TRAIN_POINTS_PER_CLASS} ({len(index_codes)}) points in class {class_dir.name}"
)
train_indices = rng.choice(
index_codes, _TRAIN_POINTS_PER_CLASS, replace=False
)
test_indices = set(index_codes).difference(train_indices)
indices_to_emit = train_indices if is_train_split else test_indices
if (
class_name == "BACKGROUND_Google"
and config_name == self._BUILDER_CONFIG_WITHOUT_BACKGROUND.name
):
print("skip BACKGROUND_Google")
continue
for indice in indices_to_emit:
record = {
"image": str(class_dir / f"image_{indice}.jpg"),
"label": class_dir.name.lower(),
}
if config_name == self._BUILDER_CONFIG_WITHOUT_BACKGROUND.name:
if class_name in _ANNOTATION_NAMES_MAP:
annotations_class_name = _ANNOTATION_NAMES_MAP[class_name]
else:
annotations_class_name = class_name
data = scipy.io.loadmat(
str(
annotation_dir
/ annotations_class_name
/ f"annotation_{indice}.mat"
)
)
# raise ValueError(data["obj_contour"].dtype, data["box_coord"])
record["annotation"] = {
"obj_contour": data["obj_contour"],
"box_coord": data["box_coord"],
}
yield f"{class_dir.name.lower()}/{f'image_{indice}.jpg'}", record