XintongHe commited on
Commit
431e811
1 Parent(s): 3ddf48a

Update Populus_Stomatal_Images_Datasets.py

Browse files
Files changed (1) hide show
  1. Populus_Stomatal_Images_Datasets.py +13 -115
Populus_Stomatal_Images_Datasets.py CHANGED
@@ -1,20 +1,3 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
  import csv
19
  import json
20
  import os
@@ -24,62 +7,31 @@ import pandas as pd
24
  import datasets
25
 
26
 
27
- # TODO: Add BibTeX citation
28
- # Find for instance the citation on arxiv or on the dataset repo/website
29
  _CITATION = """\
30
- @InProceedings{huggingface:dataset,
31
- title = {A great new dataset},
32
- author={huggingface, Inc.
33
- },
34
- year={2020}
35
- }
36
  """
37
 
38
- # TODO: Add description of the dataset here
39
- # You can copy an official description
40
  _DESCRIPTION = """\
41
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
  """
43
 
44
- # TODO: Add a link to an official homepage for the dataset here
45
- _HOMEPAGE = ""
46
 
47
- # TODO: Add the licence for the dataset here if you can find it
48
- _LICENSE = ""
49
 
50
- # TODO: Add link to the official dataset URLs here
51
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
- # _URLS = {
54
- # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
55
- # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
56
- # }
57
 
58
 
59
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
  class NewDataset(datasets.GeneratorBasedBuilder):
61
  """TODO: Short description of my dataset."""
62
 
63
  VERSION = datasets.Version("1.1.0")
64
 
65
- # This is an example of a dataset with multiple configurations.
66
- # If you don't want/need to define several sub-sets in your dataset,
67
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
-
69
- # If you need to make complex sub-parts in the datasets with configurable options
70
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
-
73
- # You will be able to load one or the other configurations in the following list with
74
- # data = datasets.load_dataset('my_dataset', 'first_domain')
75
- # data = datasets.load_dataset('my_dataset', 'second_domain')
76
- # BUILDER_CONFIGS = [
77
- # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
78
- # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
79
- # ]
80
-
81
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
-
83
  def _info(self):
84
  features = datasets.Features({
85
  "image_id": datasets.Value("string"),
@@ -125,7 +77,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
125
  # Get all image filenames
126
  all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
127
 
128
- # No longer need to randomize and split the dataset
129
  return [datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
131
  gen_kwargs={
@@ -136,8 +87,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
136
  )]
137
 
138
 
139
-
140
-
141
  def _parse_yolo_labels(self, label_path, width, height):
142
  annotations = []
143
  with open(label_path, 'r') as file:
@@ -178,13 +127,8 @@ class NewDataset(datasets.GeneratorBasedBuilder):
178
  # Default values if not found
179
  species = None
180
  scientific_name = None
181
- width = 1024 # Default value
182
- height = 768 # Default value
183
-
184
- # pics_array = None
185
- # with Image.open(image_path) as img:
186
- # pics_array = np.array(img)# Convert the PIL image to a numpy array and then to a list
187
- # # print(pics_array.shape)
188
 
189
  annotations = self._parse_yolo_labels(label_path, width, height)
190
 
@@ -198,50 +142,4 @@ class NewDataset(datasets.GeneratorBasedBuilder):
198
  "image": img,
199
  "image_resolution": {"width": width, "height": height},
200
  "annotations": annotations
201
- }
202
-
203
-
204
-
205
-
206
- # def _generate_examples(self, filepaths, species_info, data_dir, annotations_file):
207
- # """Yields examples as (key, example) tuples."""
208
- # # Load annotations from JSON file
209
- # with open(annotations_file, 'r') as file:
210
- # annotations_dict = json.load(file)
211
-
212
- # for file_name in filepaths:
213
- # image_id = os.path.splitext(file_name)[0] # Extract the base name without the file extension
214
- # image_path = os.path.join(data_dir, f"{image_id}.jpg")
215
-
216
- # # Find the corresponding row in the CSV for the current image
217
- # species_row = species_info.loc[species_info['FileName'] == image_id]
218
- # if not species_row.empty:
219
- # species = species_row['Species'].values[0]
220
- # scientific_name = species_row['ScientificName'].values[0]
221
- # width = species_row['Witdh'].values[0] # Corrected field name from 'Witdh'
222
- # height = species_row['Heigth'].values[0] # Corrected field name from 'Heigth'
223
- # else:
224
- # # Default values if not found
225
- # species = None
226
- # scientific_name = None
227
- # width = 1024 # Default value
228
- # height = 768 # Default value
229
-
230
- # # pics_array = None
231
- # # with Image.open(image_path) as img:
232
- # # pics_array = np.array(img) # Convert the PIL image to a numpy array
233
-
234
- # # Retrieve annotations for the current image from the dictionary
235
- # annotations = annotations_dict.get(image_id, [])
236
-
237
- # # Yield the dataset example
238
- # yield image_id, {
239
- # "image_id": image_id,
240
- # "species": species,
241
- # "scientific_name": scientific_name,
242
- # #"pics_array": pics_array.tolist(), # Convert numpy array to list for JSON serializability
243
- # "image_path": image_path,
244
- # "image": img,
245
- # "image_resolution": {"width": width, "height": height},
246
- # "annotations": annotations
247
- # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import csv
2
  import json
3
  import os
 
7
  import datasets
8
 
9
 
10
+
 
11
  _CITATION = """\
12
+ @article{nature},
13
+ title={Labeled temperate hardwood tree stomatal image datasets from seven taxa of Populus and 17 hardwood species},
14
+ author={Jiaxin Wang, Heidi J. Renninger and Qin Ma},
15
+ journal={Sci Data 11, 1 (2024)},
16
+ year={2024}
 
17
  """
18
 
 
 
19
  _DESCRIPTION = """\
20
+ This new dataset is designed to solve image classification and segmentation tasks and is crafted with a lot of care.
21
  """
22
 
23
+ _HOMEPAGE = "https://zenodo.org/records/8271253"
 
24
 
 
 
25
 
26
+ _LICENSE = "https://creativecommons.org/licenses/by/4.0/"
 
 
 
 
 
 
27
 
28
 
 
29
  class NewDataset(datasets.GeneratorBasedBuilder):
30
  """TODO: Short description of my dataset."""
31
 
32
  VERSION = datasets.Version("1.1.0")
33
 
34
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def _info(self):
36
  features = datasets.Features({
37
  "image_id": datasets.Value("string"),
 
77
  # Get all image filenames
78
  all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
79
 
 
80
  return [datasets.SplitGenerator(
81
  name=datasets.Split.TRAIN,
82
  gen_kwargs={
 
87
  )]
88
 
89
 
 
 
90
  def _parse_yolo_labels(self, label_path, width, height):
91
  annotations = []
92
  with open(label_path, 'r') as file:
 
127
  # Default values if not found
128
  species = None
129
  scientific_name = None
130
+ width = 1024
131
+ height = 768
 
 
 
 
 
132
 
133
  annotations = self._parse_yolo_labels(label_path, width, height)
134
 
 
142
  "image": img,
143
  "image_resolution": {"width": width, "height": height},
144
  "annotations": annotations
145
+ }