manuel-delverme commited on
Commit
82c87d6
1 Parent(s): 865ea05

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. test_repo.py +9 -35
test_repo.py CHANGED
@@ -1,7 +1,6 @@
1
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
- # TODO: Address all TODOs and remove all explanatory comments
5
  """TODO: Add a description here."""
6
 
7
  import json
@@ -11,9 +10,6 @@ import PIL.Image
11
  import datasets
12
  import numpy as np
13
 
14
- for _ in range(10):
15
- print("LOADING SCRIPT")
16
-
17
  # TODO: Add BibTeX citation
18
  # Find for instance the citation on arxiv or on the dataset repo/website
19
  _CITATION = """\
@@ -37,28 +33,20 @@ _HOMEPAGE = ""
37
  # TODO: Add the licence for the dataset here if you can find it
38
  _LICENSE = ""
39
 
40
- # TODO: Add link to the official dataset URLs here
41
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
42
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
43
  _URLS = {
44
  "8x8": [
 
45
  "https://huggingface.co/datasets/Prisma-Multimodal/segmented-imagenet1k-subset/resolve/main/images.tar.gz?download=true",
 
46
  "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/annotations/{split}_annotations/mask.tar.gz?download=true",
47
  "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/{split}.jsonl?download=true"
48
  ]
49
  }
50
 
51
 
52
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
53
  class PatchyImagenet(datasets.GeneratorBasedBuilder):
54
- """TODO: Short description of my dataset."""
55
-
56
  VERSION = datasets.Version("0.0.1")
57
 
58
- # This is an example of a dataset with multiple configurations.
59
- # If you don't want/need to define several sub-sets in your dataset,
60
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
61
-
62
  BUILDER_CONFIGS = [
63
  # datasets.BuilderConfig(name="1x1", version=VERSION, description="Patchy Imagenet with 1x1 resolution (this is the original resolution)"),
64
  datasets.BuilderConfig(name="8x8", version=VERSION, description="Patchy Imagenet with 8x8 resolution"),
@@ -74,12 +62,14 @@ class PatchyImagenet(datasets.GeneratorBasedBuilder):
74
  "image": datasets.Image(),
75
  "patches": datasets.Features(
76
  {
 
77
  # "categories": datasets.Sequence(datasets.ClassLabel(names=_IMAGENET_CLASSES)),
78
- "categories": datasets.Value("string"),
79
  "scores": datasets.Sequence(datasets.Value("float32")),
80
  "mask": datasets.Sequence(
81
  datasets.Array2D(shape=(224 // 8, 224 // 8), dtype="bool")
82
  ),
 
83
  # "mask": datasets.Sequence(datasets.Image()),
84
  }
85
  ),
@@ -87,30 +77,19 @@ class PatchyImagenet(datasets.GeneratorBasedBuilder):
87
  )
88
  return datasets.DatasetInfo(
89
  description=_DESCRIPTION,
90
- # This defines the different columns of the dataset and their types
91
  features=features,
92
- # Homepage of the dataset for documentation
93
  homepage=_HOMEPAGE,
94
- # License for the dataset if available
95
  license=_LICENSE,
96
- # Citation for the dataset
97
  citation=_CITATION,
98
  )
99
 
100
  def _split_generators(self, dl_manager):
101
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
102
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
103
-
104
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
105
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
106
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
107
  url_templates = _URLS[self.config.name]
108
 
109
  split_kwargs = {}
110
  for split in ["train", "test", "val"]:
111
  urls = [url.format(split=split) for url in url_templates]
112
  image_dir, mask_dir, metadata_file = dl_manager.download_and_extract(urls)
113
- # breakpoint()
114
  split_kwargs[split] = {
115
  "meta_path": metadata_file,
116
  "image_dir": image_dir, "mask_dir": mask_dir,
@@ -123,25 +102,20 @@ class PatchyImagenet(datasets.GeneratorBasedBuilder):
123
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=split_kwargs["test"]),
124
  ]
125
 
126
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
127
  def _generate_examples(self, meta_path, image_dir, mask_dir, split):
128
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
129
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
130
  with open(meta_path, encoding="utf-8") as f:
131
  for key, row in enumerate(f):
132
  data = json.loads(row)
133
  image_path = os.path.join(image_dir, "images", f"{split}_images", data["file_name"])
134
  sample_name, _extension = os.path.splitext(data["file_name"])
135
  mask_file = os.path.join(mask_dir, "masks", sample_name + ".npy")
136
- # mask = np.load(mask_file).astype(bool)
137
- mask = np.load(mask_file).astype(np.uint8)
138
- # breakpoint()
139
- pil_image = PIL.Image.open(image_path)
140
  yield key, {
141
- "image": pil_image,
142
  "patches": {
143
  "categories": data["patches"]["categories"],
144
  "scores": data["patches"]["scores"],
145
- "mask": list(mask),
146
  }
147
  }
 
1
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
 
4
  """TODO: Add a description here."""
5
 
6
  import json
 
10
  import datasets
11
  import numpy as np
12
 
 
 
 
13
  # TODO: Add BibTeX citation
14
  # Find for instance the citation on arxiv or on the dataset repo/website
15
  _CITATION = """\
 
33
  # TODO: Add the licence for the dataset here if you can find it
34
  _LICENSE = ""
35
 
 
 
 
36
  _URLS = {
37
  "8x8": [
38
+ # Download the original images from the original repo
39
  "https://huggingface.co/datasets/Prisma-Multimodal/segmented-imagenet1k-subset/resolve/main/images.tar.gz?download=true",
40
+ # Maks and metadata from the current
41
  "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/annotations/{split}_annotations/mask.tar.gz?download=true",
42
  "https://huggingface.co/datasets/manuel-delverme/test_repo/resolve/main/{split}.jsonl?download=true"
43
  ]
44
  }
45
 
46
 
 
47
  class PatchyImagenet(datasets.GeneratorBasedBuilder):
 
 
48
  VERSION = datasets.Version("0.0.1")
49
 
 
 
 
 
50
  BUILDER_CONFIGS = [
51
  # datasets.BuilderConfig(name="1x1", version=VERSION, description="Patchy Imagenet with 1x1 resolution (this is the original resolution)"),
52
  datasets.BuilderConfig(name="8x8", version=VERSION, description="Patchy Imagenet with 8x8 resolution"),
 
62
  "image": datasets.Image(),
63
  "patches": datasets.Features(
64
  {
65
+ # This would be best but there are too many classes
66
  # "categories": datasets.Sequence(datasets.ClassLabel(names=_IMAGENET_CLASSES)),
67
+ "categories": datasets.Sequence(datasets.Value("string")),
68
  "scores": datasets.Sequence(datasets.Value("float32")),
69
  "mask": datasets.Sequence(
70
  datasets.Array2D(shape=(224 // 8, 224 // 8), dtype="bool")
71
  ),
72
+ # Array2D is a bit annoying to use, otherwise use this
73
  # "mask": datasets.Sequence(datasets.Image()),
74
  }
75
  ),
 
77
  )
78
  return datasets.DatasetInfo(
79
  description=_DESCRIPTION,
 
80
  features=features,
 
81
  homepage=_HOMEPAGE,
 
82
  license=_LICENSE,
 
83
  citation=_CITATION,
84
  )
85
 
86
  def _split_generators(self, dl_manager):
 
 
 
 
 
 
87
  url_templates = _URLS[self.config.name]
88
 
89
  split_kwargs = {}
90
  for split in ["train", "test", "val"]:
91
  urls = [url.format(split=split) for url in url_templates]
92
  image_dir, mask_dir, metadata_file = dl_manager.download_and_extract(urls)
 
93
  split_kwargs[split] = {
94
  "meta_path": metadata_file,
95
  "image_dir": image_dir, "mask_dir": mask_dir,
 
102
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=split_kwargs["test"]),
103
  ]
104
 
 
105
  def _generate_examples(self, meta_path, image_dir, mask_dir, split):
 
 
106
  with open(meta_path, encoding="utf-8") as f:
107
  for key, row in enumerate(f):
108
  data = json.loads(row)
109
  image_path = os.path.join(image_dir, "images", f"{split}_images", data["file_name"])
110
  sample_name, _extension = os.path.splitext(data["file_name"])
111
  mask_file = os.path.join(mask_dir, "masks", sample_name + ".npy")
112
+ mask = np.load(mask_file).astype(bool)
113
+ # mask = np.load(mask_file).astype(np.uint8)
 
 
114
  yield key, {
115
+ "image": PIL.Image.open(image_path),
116
  "patches": {
117
  "categories": data["patches"]["categories"],
118
  "scores": data["patches"]["scores"],
119
+ "mask": mask,
120
  }
121
  }