Datasets:
Tasks:
Other
Size Categories:
10K<n<100K
Annotations Creators:
crowdsourced
Source Datasets:
extended|other-foodspotting
License:
Update food101.py
Browse files- food101.py +49 -54
food101.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# coding=utf-8
|
2 |
-
# Copyright 2021 The
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
# you may not use this file except in compliance with the License.
|
@@ -12,23 +12,27 @@
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
-
|
16 |
"""Dataset class for Food-101 dataset."""
|
|
|
|
|
|
|
|
|
17 |
import datasets
|
18 |
from datasets.tasks import ImageClassification
|
19 |
|
20 |
-
import json
|
21 |
-
import os
|
22 |
|
23 |
_BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
|
24 |
|
|
|
|
|
25 |
_DESCRIPTION = (
|
26 |
"This dataset consists of 101 food categories, with 101'000 images. For "
|
27 |
"each class, 250 manually reviewed test images are provided as well as 750"
|
28 |
" training images. On purpose, the training images were not cleaned, and "
|
29 |
"thus still contain some amount of noise. This comes mostly in the form of"
|
30 |
" intense colors and sometimes wrong labels. All images were rescaled to "
|
31 |
-
"have a maximum side length of 512 pixels."
|
|
|
32 |
|
33 |
_CITATION = """\
|
34 |
@inproceedings{bossard14,
|
@@ -38,6 +42,7 @@ _CITATION = """\
|
|
38 |
year = {2014}
|
39 |
}
|
40 |
"""
|
|
|
41 |
_NAMES = [
|
42 |
"apple_pie",
|
43 |
"baby_back_ribs",
|
@@ -142,56 +147,46 @@ _NAMES = [
|
|
142 |
"waffles",
|
143 |
]
|
144 |
|
145 |
-
class Food101(datasets.GeneratorBasedBuilder):
|
146 |
-
"""Food-101 Images dataset."""
|
147 |
-
|
148 |
-
def _info(self):
|
149 |
-
"""Define Dataset Info."""
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
"label": datasets.features.ClassLabel(names=sorted(tuple(_NAMES))),
|
154 |
-
}
|
155 |
-
|
156 |
-
return datasets.DatasetInfo(
|
157 |
-
description=_DESCRIPTION,
|
158 |
-
features=datasets.Features(features_dict),
|
159 |
-
homepage="https://www.vision.ee.ethz.ch/datasets_extra/food-101/",
|
160 |
-
task_templates=[ImageClassification(image_file_path_column="image", label_column="label", labels=sorted(tuple(_NAMES)))],
|
161 |
-
citation=_CITATION,
|
162 |
-
)
|
163 |
-
|
164 |
-
def _split_generators(self, dl_manager):
|
165 |
-
"""Define Splits."""
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
"json_file_path":
|
183 |
-
|
184 |
-
|
185 |
-
),
|
186 |
-
]
|
187 |
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
yield image_name, features
|
|
|
1 |
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
# you may not use this file except in compliance with the License.
|
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
|
|
15 |
"""Dataset class for Food-101 dataset."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
from pathlib import Path
|
19 |
+
|
20 |
import datasets
|
21 |
from datasets.tasks import ImageClassification
|
22 |
|
|
|
|
|
23 |
|
24 |
_BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
|
25 |
|
26 |
+
_HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/"
|
27 |
+
|
28 |
_DESCRIPTION = (
|
29 |
"This dataset consists of 101 food categories, with 101'000 images. For "
|
30 |
"each class, 250 manually reviewed test images are provided as well as 750"
|
31 |
" training images. On purpose, the training images were not cleaned, and "
|
32 |
"thus still contain some amount of noise. This comes mostly in the form of"
|
33 |
" intense colors and sometimes wrong labels. All images were rescaled to "
|
34 |
+
"have a maximum side length of 512 pixels."
|
35 |
+
)
|
36 |
|
37 |
_CITATION = """\
|
38 |
@inproceedings{bossard14,
|
|
|
42 |
year = {2014}
|
43 |
}
|
44 |
"""
|
45 |
+
|
46 |
_NAMES = [
|
47 |
"apple_pie",
|
48 |
"baby_back_ribs",
|
|
|
147 |
"waffles",
|
148 |
]
|
149 |
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
+
class Food101(datasets.GeneratorBasedBuilder):
|
152 |
+
"""Food-101 Images dataset."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
def _info(self):
|
155 |
+
return datasets.DatasetInfo(
|
156 |
+
description=_DESCRIPTION,
|
157 |
+
features=datasets.Features(
|
158 |
+
{
|
159 |
+
"image": datasets.Value("string"),
|
160 |
+
"label": datasets.features.ClassLabel(names=_NAMES),
|
161 |
+
}
|
162 |
+
),
|
163 |
+
supervised_keys=("image", "label"),
|
164 |
+
homepage=_HOMEPAGE,
|
165 |
+
task_templates=[ImageClassification(image_file_path_column="image", label_column="label", labels=_NAMES)],
|
166 |
+
citation=_CITATION,
|
167 |
+
)
|
168 |
|
169 |
+
def _split_generators(self, dl_manager):
|
170 |
+
dl_path = Path(dl_manager.download_and_extract(_BASE_URL))
|
171 |
+
meta_path = dl_path / "food-101" / "meta"
|
172 |
+
image_dir_path = dl_path / "food-101" / "images"
|
173 |
+
return [
|
174 |
+
datasets.SplitGenerator(
|
175 |
+
name=datasets.Split.TRAIN,
|
176 |
+
gen_kwargs={"json_file_path": meta_path / "train.json", "image_dir_path": image_dir_path},
|
177 |
+
),
|
178 |
+
datasets.SplitGenerator(
|
179 |
+
name=datasets.Split.VALIDATION,
|
180 |
+
gen_kwargs={"json_file_path": meta_path / "test.json", "image_dir_path": image_dir_path},
|
181 |
+
),
|
182 |
+
]
|
|
|
|
|
183 |
|
184 |
+
def _generate_examples(self, json_file_path, image_dir_path):
|
185 |
+
"""Generate images and labels for splits."""
|
186 |
+
labels = self.info.features["label"]
|
187 |
+
data = json.loads(json_file_path.read_text())
|
188 |
+
for label, images in data.items():
|
189 |
+
for image_name in images:
|
190 |
+
image = image_dir_path / f"{image_name}.jpg"
|
191 |
+
features = {"image": str(image), "label": labels.encode_example(label)}
|
192 |
+
yield image_name, features
|
|