Datasets:
food101

Task Categories: other
Languages: en
Multilinguality: monolingual
Size Categories: 10K<n<100K
Licenses: unknown
Language Creators: crowdsourced
Annotations Creators: crowdsourced
food101 / food101.py
nateraw
:tada: init 5bd4c5d
1
# coding=utf-8
2
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
"""Dataset class for Food-101 dataset."""
16
17
import json
18
from pathlib import Path
19
20
import datasets
21
from datasets.tasks import ImageClassification
22
23
24
_BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
25
26
_HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/"
27
28
_DESCRIPTION = (
29
    "This dataset consists of 101 food categories, with 101'000 images. For "
30
    "each class, 250 manually reviewed test images are provided as well as 750"
31
    " training images. On purpose, the training images were not cleaned, and "
32
    "thus still contain some amount of noise. This comes mostly in the form of"
33
    " intense colors and sometimes wrong labels. All images were rescaled to "
34
    "have a maximum side length of 512 pixels."
35
)
36
37
_CITATION = """\
38
 @inproceedings{bossard14,
39
  title = {Food-101 -- Mining Discriminative Components with Random Forests},
40
  author = {Bossard, Lukas and Guillaumin, Matthieu and Van Gool, Luc},
41
  booktitle = {European Conference on Computer Vision},
42
  year = {2014}
43
}
44
"""
45
46
_NAMES = [
47
    "apple_pie",
48
    "baby_back_ribs",
49
    "baklava",
50
    "beef_carpaccio",
51
    "beef_tartare",
52
    "beet_salad",
53
    "beignets",
54
    "bibimbap",
55
    "bread_pudding",
56
    "breakfast_burrito",
57
    "bruschetta",
58
    "caesar_salad",
59
    "cannoli",
60
    "caprese_salad",
61
    "carrot_cake",
62
    "ceviche",
63
    "cheesecake",
64
    "cheese_plate",
65
    "chicken_curry",
66
    "chicken_quesadilla",
67
    "chicken_wings",
68
    "chocolate_cake",
69
    "chocolate_mousse",
70
    "churros",
71
    "clam_chowder",
72
    "club_sandwich",
73
    "crab_cakes",
74
    "creme_brulee",
75
    "croque_madame",
76
    "cup_cakes",
77
    "deviled_eggs",
78
    "donuts",
79
    "dumplings",
80
    "edamame",
81
    "eggs_benedict",
82
    "escargots",
83
    "falafel",
84
    "filet_mignon",
85
    "fish_and_chips",
86
    "foie_gras",
87
    "french_fries",
88
    "french_onion_soup",
89
    "french_toast",
90
    "fried_calamari",
91
    "fried_rice",
92
    "frozen_yogurt",
93
    "garlic_bread",
94
    "gnocchi",
95
    "greek_salad",
96
    "grilled_cheese_sandwich",
97
    "grilled_salmon",
98
    "guacamole",
99
    "gyoza",
100
    "hamburger",
101
    "hot_and_sour_soup",
102
    "hot_dog",
103
    "huevos_rancheros",
104
    "hummus",
105
    "ice_cream",
106
    "lasagna",
107
    "lobster_bisque",
108
    "lobster_roll_sandwich",
109
    "macaroni_and_cheese",
110
    "macarons",
111
    "miso_soup",
112
    "mussels",
113
    "nachos",
114
    "omelette",
115
    "onion_rings",
116
    "oysters",
117
    "pad_thai",
118
    "paella",
119
    "pancakes",
120
    "panna_cotta",
121
    "peking_duck",
122
    "pho",
123
    "pizza",
124
    "pork_chop",
125
    "poutine",
126
    "prime_rib",
127
    "pulled_pork_sandwich",
128
    "ramen",
129
    "ravioli",
130
    "red_velvet_cake",
131
    "risotto",
132
    "samosa",
133
    "sashimi",
134
    "scallops",
135
    "seaweed_salad",
136
    "shrimp_and_grits",
137
    "spaghetti_bolognese",
138
    "spaghetti_carbonara",
139
    "spring_rolls",
140
    "steak",
141
    "strawberry_shortcake",
142
    "sushi",
143
    "tacos",
144
    "takoyaki",
145
    "tiramisu",
146
    "tuna_tartare",
147
    "waffles",
148
]
149
150
151
class Food101(datasets.GeneratorBasedBuilder):
152
    """Food-101 Images dataset."""
153
154
    def _info(self):
155
        return datasets.DatasetInfo(
156
            description=_DESCRIPTION,
157
            features=datasets.Features(
158
                {
159
                    "image": datasets.Value("string"),
160
                    "label": datasets.features.ClassLabel(names=_NAMES),
161
                }
162
            ),
163
            supervised_keys=("image", "label"),
164
            homepage=_HOMEPAGE,
165
            task_templates=[ImageClassification(image_file_path_column="image", label_column="label", labels=_NAMES)],
166
            citation=_CITATION,
167
        )
168
169
    def _split_generators(self, dl_manager):
170
        dl_path = Path(dl_manager.download_and_extract(_BASE_URL))
171
        meta_path = dl_path / "food-101" / "meta"
172
        image_dir_path = dl_path / "food-101" / "images"
173
        return [
174
            datasets.SplitGenerator(
175
                name=datasets.Split.TRAIN,
176
                gen_kwargs={"json_file_path": meta_path / "train.json", "image_dir_path": image_dir_path},
177
            ),
178
            datasets.SplitGenerator(
179
                name=datasets.Split.VALIDATION,
180
                gen_kwargs={"json_file_path": meta_path / "test.json", "image_dir_path": image_dir_path},
181
            ),
182
        ]
183
184
    def _generate_examples(self, json_file_path, image_dir_path):
185
        """Generate images and labels for splits."""
186
        labels = self.info.features["label"]
187
        data = json.loads(json_file_path.read_text())
188
        for label, images in data.items():
189
            for image_name in images:
190
                image = image_dir_path / f"{image_name}.jpg"
191
                features = {"image": str(image), "label": labels.encode_example(label)}
192
                yield image_name, features
193