manuel-delverme commited on
Commit
c765e64
1 Parent(s): bb8db92

Upload folder using huggingface_hub

Browse files
.idea/inspectionProfiles/Project_Default.xml ADDED
The diff for this file is too large to render. See raw diff
 
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/test_repo.iml" filepath="$PROJECT_DIR$/.idea/test_repo.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/test_repo.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/workspace.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectViewState">
4
+ <option name="hideEmptyMiddlePackages" value="true" />
5
+ <option name="showExcludedFiles" value="false" />
6
+ <option name="showLibraryContents" value="true" />
7
+ </component>
8
+ </project>
annotations/test_annotations/mask.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2692a5f89d12e62ef160b7c14f26a030213b1f4338d2efe9dc739a741955c388
3
+ size 192406
annotations/train_annotations/mask.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f903069f512aa44b22924b5ddf2ed4e47c8de6e4a64a1b3e92c669e5b46bd9
3
+ size 1884937
annotations/val_annotations/mask.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65eaee2355191385bc5a57dfe09f83a8144a1789dc600f82ac08c587d95d629f
3
+ size 200268
test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
test_repo.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # TODO: Address all TODOs and remove all explanatory comments
5
+ """TODO: Add a description here."""
6
+
7
+ import json
8
+ import os
9
+
10
+ import PIL.Image
11
+ import datasets
12
+ import numpy as np
13
+
14
+ for _ in range(10):
15
+ print("LOADING SCRIPT")
16
+
17
+ # TODO: Add BibTeX citation
18
+ # Find for instance the citation on arxiv or on the dataset repo/website
19
+ _CITATION = """\
20
+ @InProceedings{huggingface:dataset,
21
+ title = {A great new dataset},
22
+ author={huggingface, Inc.
23
+ },
24
+ year={2020}
25
+ }
26
+ """
27
+
28
+ # TODO: Add description of the dataset here
29
+ # You can copy an official description
30
+ _DESCRIPTION = """\
31
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
32
+ """
33
+
34
+ # TODO: Add a link to an official homepage for the dataset here
35
+ _HOMEPAGE = ""
36
+
37
+ # TODO: Add the licence for the dataset here if you can find it
38
+ _LICENSE = ""
39
+
40
+ # TODO: Add link to the official dataset URLs here
41
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
42
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
43
+ _URLS = {
44
+ "8x8": [
45
+ "https://huggingface.co/datasets/Prisma-Multimodal/segmented-imagenet1k-subset/resolve/main/images.tar.gz?download=true",
46
+ "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/annotations/{split}_annotations/mask.tar.gz?download=true",
47
+ "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/{split}.jsonl?download=true"
48
+ ]
49
+ }
50
+
51
+
52
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
53
+ class PatchyImagenet(datasets.GeneratorBasedBuilder):
54
+ """TODO: Short description of my dataset."""
55
+
56
+ VERSION = datasets.Version("0.0.1")
57
+
58
+ # This is an example of a dataset with multiple configurations.
59
+ # If you don't want/need to define several sub-sets in your dataset,
60
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
61
+
62
+ BUILDER_CONFIGS = [
63
+ # datasets.BuilderConfig(name="1x1", version=VERSION, description="Patchy Imagenet with 1x1 resolution (this is the original resolution)"),
64
+ datasets.BuilderConfig(name="8x8", version=VERSION, description="Patchy Imagenet with 8x8 resolution"),
65
+ # datasets.BuilderConfig(name="16x16", version=VERSION, description="Patchy Imagenet with 16x16 resolution"),
66
+ # datasets.BuilderConfig(name="32x32", version=VERSION, description="Patchy Imagenet with 32x32 resolution"),
67
+ # datasets.BuilderConfig(name="64x64", version=VERSION, description="Patchy Imagenet with 64x64 resolution"),
68
+ ]
69
+ DEFAULT_CONFIG_NAME = "8x8"
70
+
71
+ def _info(self):
72
+ features = datasets.Features(
73
+ {
74
+ "image": datasets.Image(),
75
+ "patches": datasets.Features(
76
+ {
77
+ # "categories": datasets.Sequence(datasets.ClassLabel(names=_IMAGENET_CLASSES)),
78
+ "categories": datasets.Value("string"),
79
+ "scores": datasets.Sequence(datasets.Value("float32")),
80
+ "mask": datasets.Array2D(shape=(224, 224), dtype="bool"),
81
+ # "mask": datasets.Sequence(datasets.Image()),
82
+ }
83
+ ),
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ # This defines the different columns of the dataset and their types
89
+ features=features,
90
+ # Homepage of the dataset for documentation
91
+ homepage=_HOMEPAGE,
92
+ # License for the dataset if available
93
+ license=_LICENSE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
100
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
101
+
102
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
103
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
104
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
105
+ url_templates = _URLS[self.config.name]
106
+
107
+ split_kwargs = {}
108
+ for split in ["train", "test", "val"]:
109
+ urls = [url.format(split=split) for url in url_templates]
110
+ image_dir, mask_dir, metadata_file = dl_manager.download_and_extract(urls)
111
+ split_kwargs[split] = {
112
+ "meta_path": metadata_file,
113
+ "image_dir": image_dir, "mask_dir": mask_dir,
114
+ "split": split
115
+ }
116
+
117
+ return [
118
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=split_kwargs["train"]),
119
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=split_kwargs["val"]),
120
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=split_kwargs["test"]),
121
+ ]
122
+
123
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
124
+ def _generate_examples(self, meta_path, image_dir, mask_dir, split):
125
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
126
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
127
+ with open(meta_path, encoding="utf-8") as f:
128
+ for key, row in enumerate(f):
129
+ data = json.loads(row)
130
+ image_path = os.path.join(image_dir, "images", f"{split}_images", data["file_name"])
131
+ sample_name, _extension = os.path.splitext(data["file_name"])
132
+ mask_file = os.path.join(mask_dir, "masks", sample_name + ".npy")
133
+ mask = np.load(mask_file).astype(bool)
134
+ # with open(image_path, "rb") as f:
135
+ # breakpoint()
136
+ pil_image = PIL.Image.open(image_path)
137
+ yield key, {
138
+ "image": pil_image,
139
+ "patches": {
140
+ "categories": data["patches"]["categories"],
141
+ "scores": data["patches"]["scores"],
142
+ "mask": list(mask),
143
+ }
144
+ }
train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
val.jsonl ADDED
The diff for this file is too large to render. See raw diff