albertvillanova HF staff commited on
Commit
d420fa6
1 Parent(s): 1ac27ab

Delete loading script

Browse files
Files changed (1) hide show
  1. food101.py +0 -217
food101.py DELETED
@@ -1,217 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Dataset class for Food-101 dataset."""
16
-
17
- import datasets
18
- from datasets.tasks import ImageClassification
19
-
20
-
21
- _BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
22
-
23
- _METADATA_URLS = {
24
- "train": "https://s3.amazonaws.com/datasets.huggingface.co/food101/meta/train.txt",
25
- "test": "https://s3.amazonaws.com/datasets.huggingface.co/food101/meta/test.txt",
26
- }
27
-
28
- _HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/"
29
-
30
- _DESCRIPTION = (
31
- "This dataset consists of 101 food categories, with 101'000 images. For "
32
- "each class, 250 manually reviewed test images are provided as well as 750"
33
- " training images. On purpose, the training images were not cleaned, and "
34
- "thus still contain some amount of noise. This comes mostly in the form of"
35
- " intense colors and sometimes wrong labels. All images were rescaled to "
36
- "have a maximum side length of 512 pixels."
37
- )
38
-
39
- _CITATION = """\
40
- @inproceedings{bossard14,
41
- title = {Food-101 -- Mining Discriminative Components with Random Forests},
42
- author = {Bossard, Lukas and Guillaumin, Matthieu and Van Gool, Luc},
43
- booktitle = {European Conference on Computer Vision},
44
- year = {2014}
45
- }
46
- """
47
-
48
- _LICENSE = """\
49
- LICENSE AGREEMENT
50
- =================
51
- - The Food-101 data set consists of images from Foodspotting [1] which are not
52
- property of the Federal Institute of Technology Zurich (ETHZ). Any use beyond
53
- scientific fair use must be negociated with the respective picture owners
54
- according to the Foodspotting terms of use [2].
55
-
56
- [1] http://www.foodspotting.com/
57
- [2] http://www.foodspotting.com/terms/
58
- """
59
-
60
- _NAMES = [
61
- "apple_pie",
62
- "baby_back_ribs",
63
- "baklava",
64
- "beef_carpaccio",
65
- "beef_tartare",
66
- "beet_salad",
67
- "beignets",
68
- "bibimbap",
69
- "bread_pudding",
70
- "breakfast_burrito",
71
- "bruschetta",
72
- "caesar_salad",
73
- "cannoli",
74
- "caprese_salad",
75
- "carrot_cake",
76
- "ceviche",
77
- "cheesecake",
78
- "cheese_plate",
79
- "chicken_curry",
80
- "chicken_quesadilla",
81
- "chicken_wings",
82
- "chocolate_cake",
83
- "chocolate_mousse",
84
- "churros",
85
- "clam_chowder",
86
- "club_sandwich",
87
- "crab_cakes",
88
- "creme_brulee",
89
- "croque_madame",
90
- "cup_cakes",
91
- "deviled_eggs",
92
- "donuts",
93
- "dumplings",
94
- "edamame",
95
- "eggs_benedict",
96
- "escargots",
97
- "falafel",
98
- "filet_mignon",
99
- "fish_and_chips",
100
- "foie_gras",
101
- "french_fries",
102
- "french_onion_soup",
103
- "french_toast",
104
- "fried_calamari",
105
- "fried_rice",
106
- "frozen_yogurt",
107
- "garlic_bread",
108
- "gnocchi",
109
- "greek_salad",
110
- "grilled_cheese_sandwich",
111
- "grilled_salmon",
112
- "guacamole",
113
- "gyoza",
114
- "hamburger",
115
- "hot_and_sour_soup",
116
- "hot_dog",
117
- "huevos_rancheros",
118
- "hummus",
119
- "ice_cream",
120
- "lasagna",
121
- "lobster_bisque",
122
- "lobster_roll_sandwich",
123
- "macaroni_and_cheese",
124
- "macarons",
125
- "miso_soup",
126
- "mussels",
127
- "nachos",
128
- "omelette",
129
- "onion_rings",
130
- "oysters",
131
- "pad_thai",
132
- "paella",
133
- "pancakes",
134
- "panna_cotta",
135
- "peking_duck",
136
- "pho",
137
- "pizza",
138
- "pork_chop",
139
- "poutine",
140
- "prime_rib",
141
- "pulled_pork_sandwich",
142
- "ramen",
143
- "ravioli",
144
- "red_velvet_cake",
145
- "risotto",
146
- "samosa",
147
- "sashimi",
148
- "scallops",
149
- "seaweed_salad",
150
- "shrimp_and_grits",
151
- "spaghetti_bolognese",
152
- "spaghetti_carbonara",
153
- "spring_rolls",
154
- "steak",
155
- "strawberry_shortcake",
156
- "sushi",
157
- "tacos",
158
- "takoyaki",
159
- "tiramisu",
160
- "tuna_tartare",
161
- "waffles",
162
- ]
163
-
164
- _IMAGES_DIR = "food-101/images/"
165
-
166
-
167
- class Food101(datasets.GeneratorBasedBuilder):
168
- """Food-101 Images dataset."""
169
-
170
- def _info(self):
171
- return datasets.DatasetInfo(
172
- description=_DESCRIPTION,
173
- features=datasets.Features(
174
- {
175
- "image": datasets.Image(),
176
- "label": datasets.ClassLabel(names=_NAMES),
177
- }
178
- ),
179
- supervised_keys=("image", "label"),
180
- homepage=_HOMEPAGE,
181
- citation=_CITATION,
182
- license=_LICENSE,
183
- task_templates=[ImageClassification(image_column="image", label_column="label")],
184
- )
185
-
186
- def _split_generators(self, dl_manager):
187
- archive_path = dl_manager.download(_BASE_URL)
188
- split_metadata_paths = dl_manager.download(_METADATA_URLS)
189
- return [
190
- datasets.SplitGenerator(
191
- name=datasets.Split.TRAIN,
192
- gen_kwargs={
193
- "images": dl_manager.iter_archive(archive_path),
194
- "metadata_path": split_metadata_paths["train"],
195
- },
196
- ),
197
- datasets.SplitGenerator(
198
- name=datasets.Split.VALIDATION,
199
- gen_kwargs={
200
- "images": dl_manager.iter_archive(archive_path),
201
- "metadata_path": split_metadata_paths["test"],
202
- },
203
- ),
204
- ]
205
-
206
- def _generate_examples(self, images, metadata_path):
207
- """Generate images and labels for splits."""
208
- with open(metadata_path, encoding="utf-8") as f:
209
- files_to_keep = set(f.read().split("\n"))
210
- for file_path, file_obj in images:
211
- if file_path.startswith(_IMAGES_DIR):
212
- if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
213
- label = file_path.split("/")[2]
214
- yield file_path, {
215
- "image": {"path": file_path, "bytes": file_obj.read()},
216
- "label": label,
217
- }