DiGyt commited on
Commit
f3983dd
1 Parent(s): b4e89fe

Upload dataset loading script

Browse files
Files changed (1) hide show
  1. ecoset.py +197 -0
ecoset.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import os.path as op
5
+ import zipfile
6
+ from getpass import getpass
7
+ from tqdm import tqdm
8
+ import platform
9
+ import subprocess
10
+
11
+ from urllib.parse import urlparse
12
+ import datasets
13
+ from datasets.filesystems import S3FileSystem
14
+ import boto3
15
+ from botocore import UNSIGNED
16
+ from botocore.client import Config
17
+
18
+
19
+
20
+
21
+ # TODO: Add BibTeX citation
22
+ # Find for instance the citation on arxiv or on the dataset repo/website
23
+ _CITATION = """\
24
+ @article{mehrer2021ecologically,
25
+ title={An ecologically motivated image dataset for deep learning yields better models of human vision},
26
+ author={Mehrer, Johannes and Spoerer, Courtney J and Jones, Emer C and Kriegeskorte, Nikolaus and Kietzmann, Tim C},
27
+ journal={Proceedings of the National Academy of Sciences},
28
+ volume={118},
29
+ number={8},
30
+ year={2021},
31
+ publisher={National Acad Sciences}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ Tired of all the dogs in ImageNet (ILSVRC)? Then ecoset is here for you. 1.5m images
39
+ from 565 basic level categories, chosen to be both (i) frequent in linguistic usage,
40
+ and (ii) rated by human observers as concrete (e.g. ‘table’ is concrete, ‘romance’
41
+ is not). Here we collect resources associated with ecoset. This includes the dataset,
42
+ trained deep neural network models, code to interact with them, and published papers
43
+ using it.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = "https://www.kietzmannlab.org/ecoset/"
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = "CC BY NC SA 2.0"
51
+
52
+ # TODO: Add link to the official dataset URLs here
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _URLS = {
56
+ #"codeocean": "https://files.codeocean.com/datasets/verified/0ab003f4-ff2d-4de3-b4f8-b6e349c0e5e5/ecoset.zip?download",
57
+ "codeocean": "s3://codeocean-datasets/0ab003f4-ff2d-4de3-b4f8-b6e349c0e5e5/ecoset.zip",
58
+ }
59
+
60
+ labels = ['cymbals', 'bison', 'lemonade', 'crib', 'chestnut', 'mosquito', 'aloe', 'extinguisher', 'onion', 'starfish', 'basket', 'jar', 'snail', 'mushroom', 'coffin', 'joystick', 'raspberry', 'gearshift', 'tyrannosaurus', 'stadium', 'telescope', 'blueberry', 'hippo', 'cannabis', 'hairbrush', 'river', 'artichoke', 'wallet', 'city', 'bee', 'rifle', 'boar', 'bib', 'envelope', 'silverfish', 'shower', 'curtain', 'pinwheel', 'guillotine', 'snowplow', 'hut', 'jukebox', 'gecko', 'marshmallow', 'lobster', 'flashlight', 'breadfruit', 'cow', 'spoon', 'blender', 'croissant', 'greenhouse', 'church', 'antenna', 'monkey', 'zucchini', 'snake', 'manatee', 'child', 'table', 'winterberry', 'sloth', 'cannon', 'baguette', 'persimmon', 'candelabra', 'necklace', 'flag', 'geyser', 'thermos', 'tweezers', 'chandelier', 'kebab', 'mailbox', 'steamroller', 'crayon', 'lawnmower', 'pomegranate', 'fire', 'violin', 'matchstick', 'train', 'hamster', 'bobsleigh', 'boat', 'bullet', 'forklift', 'clock', 'saltshaker', 'anteater', 'crowbar', 'lightbulb', 'pier', 'muffin', 'paintbrush', 'crawfish', 'bench', 'nectarine', 'eyedropper', 'backpack', 'goat', 'hotplate', 'fishnet', 'robot', 'rice', 'shovel', 'candle', 'blimp', 'bridge', 'mountain', 'coleslaw', 'stagecoach', 'waterfall', 'ladle', 'radiator', 'drain', 'tray', 'house', 'key', 'skunk', 'lake', 'earpiece', 'gazebo', 'blackberry', 'groundhog', 'paperclip', 'cookie', 'milk', 'rug', 'thermostat', 'milkshake', 'scoreboard', 'bean', 'giraffe', 'antelope', 'newsstand', 'camcorder', 'sawmill', 'balloon', 'ladder', 'videotape', 'microphone', 'coin', 'hay', 'moth', 'octopus', 'honeycomb', 'wrench', 'cane', 'bobcat', 'banner', 'newspaper', 'reef', 'worm', 'cucumber', 'beach', 'couch', 'streetlamp', 'rhino', 'ceiling', 'cupcake', 'hourglass', 'caterpillar', 'tamale', 'asparagus', 'flower', 'frog', 'dog', 'knife', 'lamp', 'walnut', 'grape', 'scone', 'peanut', 'ferret', 'kettle', 'elephant', 'oscilloscope', 'weasel', 'guava', 'gramophone', 'stove', 'bamboo', 'chicken', 'guacamole', 'toolbox', 'tractor', 'tiger', 'butterfly', 'coffeepot', 'bus', 'meteorite', 'fish', 'graveyard', 'blowtorch', 'grapefruit', 'cat', 'jellyfish', 'carousel', 'wheat', 'tadpole', 'kazoo', 'raccoon', 'typewriter', 'scissors', 'pothole', 'earring', 'drawers', 'cup', 'warthog', 'wall', 'lighthouse', 'burrito', 'cassette', 'nacho', 'sink', 'seashell', 'bed', 'noodles', 'woman', 'rabbit', 'fence', 'pistachio', 'pencil', 'hotdog', 'ball', 'ship', 'strawberry', 'pan', 'custard', 'dolphin', 'tent', 'bun', 'tortilla', 'tumbleweed', 'playground', 'scallion', 'anchor', 'hare', 'waterspout', 'dough', 'burner', 'kale', 'razor', 'chocolate', 'doughnut', 'squeegee', 'bandage', 'beaver', 'refrigerator', 'cork', 'anvil', 'microchip', 'banana', 'thumbtack', 'chair', 'sharpener', 'bird', 'castle', 'wand', 'doormat', 'celery', 'steak', 'ant', 'apple', 'cave', 'scaffolding', 'bell', 'towel', 'mantis', 'thimble', 'bowl', 'chess', 'pickle', 'lollypop', 'leek', 'barrel', 'dollhouse', 'tapioca', 'spareribs', 'fig', 'apricot', 'strongbox', 'brownie', 'beaker', 'manhole', 'piano', 'whale', 'hammer', 'dishrag', 'pecan', 'highlighter', 'pretzel', 'earwig', 'cogwheel', 'trashcan', 'syringe', 'turnip', 'pear', 'lettuce', 'hedgehog', 'guardrail', 'bubble', 'pineapple', 'burlap', 'moon', 'spider', 'fern', 'binoculars', 'gravel', 'plum', 'scorpion', 'cube', 'squirrel', 'book', 'crouton', 'bag', 'lantern', 'parsley', 'jaguar', 'thyme', 'oyster', 'kumquat', 'chinchilla', 'cherry', 'umbrella', 'bicycle', 'eggbeater', 'pig', 'kitchen', 'fondue', 'treadmill', 'casket', 'papaya', 'beetle', 'shredder', 'grasshopper', 'anthill', 'chili', 'bottle', 'calculator', 'gondola', 'pizza', 'compass', 'mop', 'hamburger', 'chipmunk', 'bagel', 'outhouse', 'pliers', 'wolf', 'matchbook', 'corn', 'salamander', 'lasagna', 'stethoscope', 'eggroll', 'avocado', 'eggplant', 'mouse', 'walrus', 'sprinkler', 'glass', 'cauldron', 'parsnip', 'canoe', 'pancake', 'koala', 'deer', 'chalk', 'urinal', 'toilet', 'cabbage', 'platypus', 'lizard', 'leopard', 'cake', 'hammock', 'defibrillator', 'sundial', 'beet', 'popcorn', 'spinach', 'cauliflower', 'canyon', 'spacecraft', 'teapot', 'tunnel', 'porcupine', 'jail', 'spearmint', 'dustpan', 'calipers', 'toast', 'drum', 'phone', 'wire', 'alligator', 'vase', 'motorcycle', 'toothpick', 'coconut', 'lion', 'turtle', 'cheetah', 'bugle', 'casino', 'fountain', 'pie', 'bread', 'meatball', 'windmill', 'gun', 'projector', 'chameleon', 'tomato', 'nutmeg', 'plate', 'bulldozer', 'camel', 'sphinx', 'mall', 'hanger', 'ukulele', 'wheelbarrow', 'ring', 'dildo', 'loudspeaker', 'odometer', 'ruler', 'mousetrap', 'breadbox', 'parachute', 'bolt', 'bracelet', 'library', 'otter', 'airplane', 'pea', 'tongs', 'cactus', 'knot', 'shrimp', 'computer', 'sheep', 'television', 'melon', 'kangaroo', 'helicopter', 'birdcage', 'pumpkin', 'dishwasher', 'crocodile', 'stairs', 'garlic', 'barnacle', 'crate', 'lime', 'axe', 'hairpin', 'egg', 'emerald', 'candy', 'stegosaurus', 'broom', 'mistletoe', 'submarine', 'fireworks', 'peach', 'ape', 'chalkboard', 'bumblebee', 'potato', 'battery', 'guitar', 'opossum', 'volcano', 'llama', 'ashtray', 'sieve', 'coliseum', 'cinnamon', 'moose', 'tree', 'donkey', 'wasp', 'corkscrew', 'gargoyle', 'taco', 'macadamia', 'camera', 'mandolin', 'kite', 'cranberry', 'thermometer', 'tofu', 'closet', 'hovercraft', 'escalator', 'horseshoe', 'wristwatch', 'lemon', 'sushi', 'rat', 'rainbow', 'pillow', 'radish', 'granola', 'okra', 'pastry', 'mango', 'dragonfly', 'flashbulb', 'chalice', 'acorn', 'birdhouse', 'gooseberry', 'locker', 'padlock', 'missile', 'clarinet', 'panda', 'iceberg', 'road', 'flea', 'hazelnut', 'cockroach', 'needle', 'omelet', 'desert', 'condom', 'graffiti', 'iguana', 'bucket', 'photocopier', 'blanket', 'microscope', 'horse', 'nest', 'screwdriver', 'toaster', 'car', 'doll', 'salsa', 'man', 'zebra', 'stapler', 'grate', 'truck', 'bear', 'carrot', 'auditorium', 'cashew', 'shield', 'crown', 'altar', 'pudding', 'cheese', 'rhubarb', 'broccoli', 'tower', 'cumin', 'elevator', 'wheelchair', 'flyswatter']
61
+
62
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
63
+ class Ecoset(datasets.GeneratorBasedBuilder):
64
+ """Ecoset is a large clean and ecologically valid image dataset."""
65
+
66
+ VERSION = datasets.Version("1.1.0")
67
+
68
+ # This is an example of a dataset with multiple configurations.
69
+ # If you don't want/need to define several sub-sets in your dataset,
70
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
71
+
72
+ # If you need to make complex sub-parts in the datasets with configurable options
73
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
74
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
75
+
76
+ # You will be able to load one or the other configurations in the following list with
77
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
78
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(name="Full", version=VERSION, description="We could do different splits of the dataset here. But we don't"),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = "Full" # It's not mandatory to have a default configuration. Just use one if it make sense.
84
+
85
+ def _info(self):
86
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
+
88
+ features=datasets.Features(
89
+ {
90
+ "image": datasets.Image(),
91
+ #"label": datasets.ClassLabel(names=list(IMAGENET2012_CLASSES.values())),
92
+ "label": datasets.ClassLabel(names=labels),
93
+ }
94
+ )
95
+ return datasets.DatasetInfo(
96
+ # This is the description that will appear on the datasets page.
97
+ description=_DESCRIPTION,
98
+ # This defines the different columns of the dataset and their types
99
+ features=features, # Here we define them above because they are different between the two configurations
100
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
101
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
102
+ # supervised_keys=("sentence", "label"),
103
+ # Homepage of the dataset for documentation
104
+ homepage=_HOMEPAGE,
105
+ # License for the dataset if available
106
+ license=_LICENSE,
107
+ # Citation for the dataset
108
+ citation=_CITATION,
109
+ task_templates=[datasets.tasks.ImageClassification(image_column="image", label_column="label")],
110
+ )
111
+
112
+
113
+ def _split_generators(self, dl_manager):
114
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
115
+ password = getpass("\nIn order to use ecoset, please read the README and License "
116
+ "agreement found under:\nhttps://codeocean.com/capsule/9570390"
117
+ "\nand enter the mentioned password.\n\nPlease Enter Password:\n")
118
+ def abslist(path):
119
+ return [op.join(path, p) for p in os.listdir(path)]
120
+
121
+ def s3_zipfile_download(source_url, target_dir):
122
+ s3 = S3FileSystem(anon=True, use_ssl=False, default_block_size=int(15 * 2**20))
123
+ with s3.open(source_url, "rb") as raw_filw:
124
+ with ZipFile(raw_filw, compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip_file:
125
+ member_list = zip_file.namelist()
126
+ for member in tqdm(member_list, total=len(member_list), desc="Extracting ecoset to disc"):
127
+ zip_file.extract(member, target_dir, pwd=password.encode("ascii"))
128
+
129
+
130
+ def subprocess_download(source_url, target_dir):
131
+ # download
132
+ urlinfo = urlparse(source_url, allow_fragments=False)
133
+ if not op.exists(target_dir):
134
+ os.makedirs(target_dir)
135
+ zip_path = op.join(target_dir, "ecoset.zip")
136
+ s3 = boto3.client(urlinfo.scheme, config=Config(signature_version=UNSIGNED))
137
+ s3.download_file(urlinfo.netloc, urlinfo.path[1:], zip_path)
138
+
139
+ # unzip
140
+ # Expand-Archive -LiteralPath <PathToZipFile> -DestinationPath <PathToDestination>
141
+ subprocess.call(["unzip", "-P", password.encode("ascii"), "-o", zip_path, "-d", target_dir], shell=False)
142
+
143
+ if platform.system() in ("Linux", "Darwin"):
144
+ print('Using "fast" Linux/Mac Download and Unzipping. This will take about 15h on a typical Computer.')
145
+ archives = dl_manager.download_custom(_URLS["codeocean"], subprocess_download)
146
+ else:
147
+ print('Using slow Windows Download and Unzipping. This can take up to 70h on a typical Computer. Sorry.')
148
+ archives = dl_manager.download_custom(_URLS["codeocean"], s3_zipfile_download)
149
+
150
+ #archives = dl_manager.download(_URLS["codeocean"])
151
+ print(archives)
152
+
153
+ # create a dict containing all files
154
+ split_dict = {split:[] for split in ("train", "val", "test")}
155
+ for split in split_dict.keys():
156
+ fnames = abslist(op.join(archives, split))
157
+ for f in fnames:
158
+ split_dict[split].extend(abslist(f))
159
+
160
+ # return data splits
161
+ return [datasets.SplitGenerator(
162
+ name=datasets.Split.TRAIN,
163
+ gen_kwargs={
164
+ "archives": split_dict["train"],
165
+ "split": "train",
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.VALIDATION,
170
+ gen_kwargs={
171
+ "archives": split_dict["val"],
172
+ "split": "validation",
173
+ },
174
+ ),
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ gen_kwargs={
178
+ "archives": split_dict["test"],
179
+ "split": "test",
180
+ },
181
+ ),
182
+ ]
183
+
184
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
185
+ def _generate_examples(self, archives, split):
186
+ """Yields examples."""
187
+ idx = 0
188
+ for archive in archives:
189
+ if any(archive.endswith(i) for i in (".JPEG", ".JPG", ".jpeg", ".jpg")):
190
+
191
+ # extract file, label, etc
192
+ file = open(archive, 'rb')
193
+ synset_id, label = archive.split("/")[-2].split("_")
194
+ ex = {"image": {"path": archive, "bytes": file.read()}, "label": label}
195
+
196
+ yield idx, ex
197
+ idx += 1