XintongHe commited on
Commit
22b8b85
1 Parent(s): c67a7f2

Update new_dataset_script.py

Browse files
Files changed (1) hide show
  1. new_dataset_script.py +75 -99
new_dataset_script.py CHANGED
@@ -60,13 +60,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
- BUILDER_CONFIGS = [
64
- datasets.BuilderConfig(name="full", version=VERSION, description="The full dataset"),
65
- datasets.BuilderConfig(name="small", version=VERSION, description="A small sample of the dataset for quicker loading"),
66
- ]
67
-
68
- DEFAULT_CONFIG_NAME = "full"
69
-
70
  # This is an example of a dataset with multiple configurations.
71
  # If you don't want/need to define several sub-sets in your dataset,
72
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
@@ -115,104 +108,88 @@ class NewDataset(datasets.GeneratorBasedBuilder):
115
 
116
 
117
  def _split_generators(self, dl_manager):
118
- # Download and extract the dataset using Hugging Face's datasets library
119
- data_files = dl_manager.download_and_extract({
120
- "csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv",
121
- "zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip"
122
- })
123
-
124
- # Load the CSV file containing species and scientific names
125
- species_info = pd.read_csv(data_files["csv"])
126
-
127
- # The directory 'Labeled Stomatal Images' is where the images and labels are stored after extraction
128
- extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images")
129
-
130
- # Get the list of image filenames from the CSV
131
- all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
132
-
133
- # Shuffle the list for random split
134
- random.seed(42) # Set a random seed for reproducibility
135
- random.shuffle(all_image_filenames)
136
-
137
- num_files = len(all_image_filenames)
138
- train_split_end = int(num_files * 0.7)
139
- val_split_end = train_split_end + int(num_files * 0.15)
140
-
141
- train_files = all_image_filenames[:train_split_end]
142
- val_files = all_image_filenames[train_split_end:val_split_end]
143
- test_files = all_image_filenames[val_split_end:]
144
-
145
-
146
-
147
-
148
 
149
- return [
150
- datasets.SplitGenerator(
151
- name=datasets.Split.TRAIN,
152
- gen_kwargs={
153
- "filepaths": train_files,
154
- "species_info": species_info,
155
- "data_dir": extracted_images_path,
156
- "split": "train",
157
- },
158
- ),
159
- datasets.SplitGenerator(
160
- name=datasets.Split.VALIDATION,
161
- gen_kwargs={
162
- "filepaths": val_files,
163
- "species_info": species_info,
164
- "data_dir": extracted_images_path,
165
- "split": "train",
166
- },
167
- ),
168
- datasets.SplitGenerator(
169
- name=datasets.Split.TEST,
170
- gen_kwargs={
171
- "filepaths": test_files,
172
- "species_info": species_info,
173
- "data_dir": extracted_images_path,
174
- "split": "train",
175
- },
176
- ),
177
- ]
178
-
179
- def _split_files(self, file_list):
180
- num_files = len(file_list)
181
- train_split_end = int(num_files * 0.7)
182
- val_split_end = train_split_end + int(num_files * 0.15)
183
-
184
- train_files = file_list[:train_split_end]
185
- val_files = file_list[train_split_end:val_split_end]
186
- test_files = file_list[val_split_end:]
187
 
188
- return train_files, val_files, test_files
189
-
190
 
 
 
191
 
 
 
 
 
 
 
 
 
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  # ... other necessary imports and class definitions
195
  def _parse_yolo_labels(self, label_path, width, height):
196
- annotations = []
197
- with open(label_path, 'r') as file:
198
- yolo_data = file.readlines()
199
-
200
- for line in yolo_data:
201
- class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split())
202
- x_min = (x_center_rel - width_rel / 2) * width
203
- y_min = (y_center_rel - height_rel / 2) * height
204
- x_max = (x_center_rel + width_rel / 2) * width
205
- y_max = (y_center_rel + height_rel / 2) * height
206
- annotations.append({
207
- "category_id": int(class_id),
208
- "bounding_box": {
209
- "x_min": x_min,
210
- "y_min": y_min,
211
- "x_max": x_max,
212
- "y_max": y_max
213
- }
214
- })
215
- return annotations
216
 
217
  def _generate_examples(self, filepaths, species_info, data_dir, split):
218
  """Yields examples as (key, example) tuples."""
@@ -238,5 +215,4 @@ class NewDataset(datasets.GeneratorBasedBuilder):
238
  "pics_array": pics_array,
239
  "image_resolution": {"width": width, "height": height},
240
  "annotations": annotations
241
- }
242
-
 
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
 
 
 
 
 
 
 
63
  # This is an example of a dataset with multiple configurations.
64
  # If you don't want/need to define several sub-sets in your dataset,
65
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
 
108
 
109
 
110
  def _split_generators(self, dl_manager):
111
+ # Download and extract the dataset using Hugging Face's datasets library
112
+ data_files = dl_manager.download_and_extract({
113
+ "csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv",
114
+ "zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip"
115
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ # Load the CSV file containing species and scientific names
118
+ species_info = pd.read_csv(data_files["csv"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
+ # The directory 'Labeled Stomatal Images' is where the images and labels are stored after extraction
121
+ extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images")
122
 
123
+ # Get the list of image filenames from the CSV
124
+ all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
125
 
126
+ # Shuffle the list for random split
127
+ random.seed(42) # Set a random seed for reproducibility
128
+ random.shuffle(all_image_filenames)
129
+
130
+ # Split the files into train/validation/test
131
+ num_files = len(all_image_filenames)
132
+ train_split_end = int(num_files * 0.7)
133
+ val_split_end = train_split_end + int(num_files * 0.15)
134
 
135
+ train_files = all_image_filenames[:train_split_end]
136
+ val_files = all_image_filenames[train_split_end:val_split_end]
137
+ test_files = all_image_filenames[val_split_end:]
138
+
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TRAIN,
142
+ gen_kwargs={
143
+ "filepaths": train_files,
144
+ "species_info": species_info,
145
+ "data_dir": extracted_images_path,
146
+ "split": "train",
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.VALIDATION,
151
+ gen_kwargs={
152
+ "filepaths": val_files,
153
+ "species_info": species_info,
154
+ "data_dir": extracted_images_path,
155
+ "split": "validation",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={
161
+ "filepaths": test_files,
162
+ "species_info": species_info,
163
+ "data_dir": extracted_images_path,
164
+ "split": "test",
165
+ },
166
+ ),
167
+ ]
168
+
169
+
170
 
171
  # ... other necessary imports and class definitions
172
  def _parse_yolo_labels(self, label_path, width, height):
173
+ annotations = []
174
+ with open(label_path, 'r') as file:
175
+ yolo_data = file.readlines()
176
+
177
+ for line in yolo_data:
178
+ class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split())
179
+ x_min = (x_center_rel - width_rel / 2) * width
180
+ y_min = (y_center_rel - height_rel / 2) * height
181
+ x_max = (x_center_rel + width_rel / 2) * width
182
+ y_max = (y_center_rel + height_rel / 2) * height
183
+ annotations.append({
184
+ "category_id": int(class_id),
185
+ "bounding_box": {
186
+ "x_min": x_min,
187
+ "y_min": y_min,
188
+ "x_max": x_max,
189
+ "y_max": y_max
190
+ }
191
+ })
192
+ return annotations
193
 
194
  def _generate_examples(self, filepaths, species_info, data_dir, split):
195
  """Yields examples as (key, example) tuples."""
 
215
  "pics_array": pics_array,
216
  "image_resolution": {"width": width, "height": height},
217
  "annotations": annotations
218
+ }