SaulLu commited on
Commit
3f302b2
1 Parent(s): d6fc057
Files changed (1) hide show
  1. Caltech-101.py +27 -19
Caltech-101.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
@@ -11,18 +11,14 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
 
17
 
18
- import csv
19
- import json
20
- import os
21
  from pathlib import Path
22
 
23
  import datasets
24
- from datasets.tasks import ImageClassification
25
  import numpy as np
 
26
 
27
  _CITATION = """\
28
  @article{FeiFei2004LearningGV,
@@ -180,31 +176,35 @@ class Caltech101(datasets.GeneratorBasedBuilder):
180
 
181
  def _split_generators(self, dl_manager):
182
  data_root_dir = dl_manager.download_and_extract(_DATA_URL)
183
- compress_folder_path = [file for file in dl_manager.iter_files(data_root_dir) if Path(file).name == "101_ObjectCategories.tar.gz"][0]
 
 
 
 
184
  data_dir = dl_manager.extract(compress_folder_path)
185
  return [
186
  datasets.SplitGenerator(
187
  name=datasets.Split.TRAIN,
188
- # These kwargs will be passed to _generate_examples
189
  gen_kwargs={
190
- "filepath": data_dir, # TODO: change accordingly
191
  "split": "train",
192
  },
193
  ),
194
  datasets.SplitGenerator(
195
  name=datasets.Split.TEST,
196
- # These kwargs will be passed to _generate_examples
197
  gen_kwargs={
198
- "filepath": data_dir, # TODO: change accordingly
199
  "split": "test",
200
  },
201
  ),
202
  ]
203
 
204
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
205
  def _generate_examples(self, filepath, split):
206
- # Same stratagy as the one proposed in TF datasets
207
- is_train_split = (split == "train")
 
 
 
208
  data_dir = Path(filepath) / "101_ObjectCategories"
209
  # Sets random seed so the random partitioning of files is the same when
210
  # called for the train and test splits.
@@ -212,14 +212,22 @@ class Caltech101(datasets.GeneratorBasedBuilder):
212
  np.random.seed(1234)
213
 
214
  for class_dir in data_dir.iterdir():
215
- fnames = [image_path for image_path in class_dir.iterdir() if image_path.name.endswith(".jpg")]
 
 
 
 
216
  # _TRAIN_POINTS_PER_CLASS datapoints are sampled for the train split,
217
  # the others constitute the test split.
218
  if _TRAIN_POINTS_PER_CLASS > len(fnames):
219
- raise ValueError("Fewer than {} ({}) points in class {}".format(
220
- _TRAIN_POINTS_PER_CLASS, len(fnames), class_dir.name))
 
 
 
221
  train_fnames = np.random.choice(
222
- fnames, _TRAIN_POINTS_PER_CLASS, replace=False)
 
223
  test_fnames = set(fnames).difference(train_fnames)
224
  fnames_to_emit = train_fnames if is_train_split else test_fnames
225
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Caltech 101 loading script"""
 
15
 
16
 
 
 
 
17
  from pathlib import Path
18
 
19
  import datasets
 
20
  import numpy as np
21
+ from datasets.tasks import ImageClassification
22
 
23
  _CITATION = """\
24
  @article{FeiFei2004LearningGV,
 
176
 
177
  def _split_generators(self, dl_manager):
178
  data_root_dir = dl_manager.download_and_extract(_DATA_URL)
179
+ compress_folder_path = [
180
+ file
181
+ for file in dl_manager.iter_files(data_root_dir)
182
+ if Path(file).name == "101_ObjectCategories.tar.gz"
183
+ ][0]
184
  data_dir = dl_manager.extract(compress_folder_path)
185
  return [
186
  datasets.SplitGenerator(
187
  name=datasets.Split.TRAIN,
 
188
  gen_kwargs={
189
+ "filepath": data_dir,
190
  "split": "train",
191
  },
192
  ),
193
  datasets.SplitGenerator(
194
  name=datasets.Split.TEST,
 
195
  gen_kwargs={
196
+ "filepath": data_dir,
197
  "split": "test",
198
  },
199
  ),
200
  ]
201
 
 
202
  def _generate_examples(self, filepath, split):
203
+ # Same stratagy as the one proposed in TF datasets: 30 random examples from each class are added to the train
204
+ # split, and the remainder are added to the test split.
205
+ # Source: https://github.com/tensorflow/datasets/blob/1106d587f97c4fca68c5b593dc7dc48c790ffa8c/tensorflow_datasets/image_classification/caltech.py#L88-L140
206
+
207
+ is_train_split = split == "train"
208
  data_dir = Path(filepath) / "101_ObjectCategories"
209
  # Sets random seed so the random partitioning of files is the same when
210
  # called for the train and test splits.
 
212
  np.random.seed(1234)
213
 
214
  for class_dir in data_dir.iterdir():
215
+ fnames = [
216
+ image_path
217
+ for image_path in class_dir.iterdir()
218
+ if image_path.name.endswith(".jpg")
219
+ ]
220
  # _TRAIN_POINTS_PER_CLASS datapoints are sampled for the train split,
221
  # the others constitute the test split.
222
  if _TRAIN_POINTS_PER_CLASS > len(fnames):
223
+ raise ValueError(
224
+ "Fewer than {} ({}) points in class {}".format(
225
+ _TRAIN_POINTS_PER_CLASS, len(fnames), class_dir.name
226
+ )
227
+ )
228
  train_fnames = np.random.choice(
229
+ fnames, _TRAIN_POINTS_PER_CLASS, replace=False
230
+ )
231
  test_fnames = set(fnames).difference(train_fnames)
232
  fnames_to_emit = train_fnames if is_train_split else test_fnames
233