sync_food101 / food101.py
yourusername's picture
:tada: init
5bd4c5d
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset class for Food-101 dataset."""
import json
from pathlib import Path
import datasets
from datasets.tasks import ImageClassification
_BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/"
_DESCRIPTION = (
"This dataset consists of 101 food categories, with 101'000 images. For "
"each class, 250 manually reviewed test images are provided as well as 750"
" training images. On purpose, the training images were not cleaned, and "
"thus still contain some amount of noise. This comes mostly in the form of"
" intense colors and sometimes wrong labels. All images were rescaled to "
"have a maximum side length of 512 pixels."
)
_CITATION = """\
@inproceedings{bossard14,
title = {Food-101 -- Mining Discriminative Components with Random Forests},
author = {Bossard, Lukas and Guillaumin, Matthieu and Van Gool, Luc},
booktitle = {European Conference on Computer Vision},
year = {2014}
}
"""
_NAMES = [
"apple_pie",
"baby_back_ribs",
"baklava",
"beef_carpaccio",
"beef_tartare",
"beet_salad",
"beignets",
"bibimbap",
"bread_pudding",
"breakfast_burrito",
"bruschetta",
"caesar_salad",
"cannoli",
"caprese_salad",
"carrot_cake",
"ceviche",
"cheesecake",
"cheese_plate",
"chicken_curry",
"chicken_quesadilla",
"chicken_wings",
"chocolate_cake",
"chocolate_mousse",
"churros",
"clam_chowder",
"club_sandwich",
"crab_cakes",
"creme_brulee",
"croque_madame",
"cup_cakes",
"deviled_eggs",
"donuts",
"dumplings",
"edamame",
"eggs_benedict",
"escargots",
"falafel",
"filet_mignon",
"fish_and_chips",
"foie_gras",
"french_fries",
"french_onion_soup",
"french_toast",
"fried_calamari",
"fried_rice",
"frozen_yogurt",
"garlic_bread",
"gnocchi",
"greek_salad",
"grilled_cheese_sandwich",
"grilled_salmon",
"guacamole",
"gyoza",
"hamburger",
"hot_and_sour_soup",
"hot_dog",
"huevos_rancheros",
"hummus",
"ice_cream",
"lasagna",
"lobster_bisque",
"lobster_roll_sandwich",
"macaroni_and_cheese",
"macarons",
"miso_soup",
"mussels",
"nachos",
"omelette",
"onion_rings",
"oysters",
"pad_thai",
"paella",
"pancakes",
"panna_cotta",
"peking_duck",
"pho",
"pizza",
"pork_chop",
"poutine",
"prime_rib",
"pulled_pork_sandwich",
"ramen",
"ravioli",
"red_velvet_cake",
"risotto",
"samosa",
"sashimi",
"scallops",
"seaweed_salad",
"shrimp_and_grits",
"spaghetti_bolognese",
"spaghetti_carbonara",
"spring_rolls",
"steak",
"strawberry_shortcake",
"sushi",
"tacos",
"takoyaki",
"tiramisu",
"tuna_tartare",
"waffles",
]
class Food101(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
task_templates=[ImageClassification(image_file_path_column="image", label_column="label", labels=_NAMES)],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_path = Path(dl_manager.download_and_extract(_BASE_URL))
meta_path = dl_path / "food-101" / "meta"
image_dir_path = dl_path / "food-101" / "images"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"json_file_path": meta_path / "train.json", "image_dir_path": image_dir_path},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"json_file_path": meta_path / "test.json", "image_dir_path": image_dir_path},
),
]
def _generate_examples(self, json_file_path, image_dir_path):
"""Generate images and labels for splits."""
labels = self.info.features["label"]
data = json.loads(json_file_path.read_text())
for label, images in data.items():
for image_name in images:
image = image_dir_path / f"{image_name}.jpg"
features = {"image": str(image), "label": labels.encode_example(label)}
yield image_name, features