XintongHe commited on
Commit
7a8b592
1 Parent(s): 217ab18

Update new_dataset_script.py

Browse files
Files changed (1) hide show
  1. new_dataset_script.py +96 -74
new_dataset_script.py CHANGED
@@ -60,6 +60,13 @@ class NewDataset(datasets.GeneratorBasedBuilder):
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
 
 
 
 
 
 
 
63
  # This is an example of a dataset with multiple configurations.
64
  # If you don't want/need to define several sub-sets in your dataset,
65
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
@@ -108,88 +115,103 @@ class NewDataset(datasets.GeneratorBasedBuilder):
108
 
109
 
110
  def _split_generators(self, dl_manager):
111
- # Download and extract the dataset using Hugging Face's datasets library
112
- data_files = dl_manager.download_and_extract({
113
- "csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv",
114
- "zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip"
115
- })
 
 
 
 
 
 
116
 
117
- # Load the CSV file containing species and scientific names
118
- species_info = pd.read_csv(data_files["csv"])
119
 
120
- # The directory 'Labeled Stomatal Images' is where the images and labels are stored after extraction
121
- extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images")
122
-
123
- # Get the list of image filenames from the CSV
124
- all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
125
-
126
- # Shuffle the list for random split
127
- random.seed(42) # Set a random seed for reproducibility
128
- random.shuffle(all_image_filenames)
129
 
130
- # Split the files into train/validation/test
131
- num_files = len(all_image_filenames)
132
- train_split_end = int(num_files * 0.7)
133
- val_split_end = train_split_end + int(num_files * 0.15)
 
 
 
 
 
134
 
135
- train_files = all_image_filenames[:train_split_end]
136
- val_files = all_image_filenames[train_split_end:val_split_end]
137
- test_files = all_image_filenames[val_split_end:]
138
-
139
- return [
140
- datasets.SplitGenerator(
141
- name=datasets.Split.TRAIN,
142
- gen_kwargs={
143
- "filepaths": train_files,
144
- "species_info": species_info,
145
- "data_dir": extracted_images_path,
146
- "split": "train",
147
- },
148
- ),
149
- datasets.SplitGenerator(
150
- name=datasets.Split.VALIDATION,
151
- gen_kwargs={
152
- "filepaths": val_files,
153
- "species_info": species_info,
154
- "data_dir": extracted_images_path,
155
- "split": "validation",
156
- },
157
- ),
158
- datasets.SplitGenerator(
159
- name=datasets.Split.TEST,
160
- gen_kwargs={
161
- "filepaths": test_files,
162
- "species_info": species_info,
163
- "data_dir": extracted_images_path,
164
- "split": "test",
165
- },
166
- ),
167
- ]
 
 
 
 
 
 
 
 
 
 
168
 
169
-
170
 
171
  # ... other necessary imports and class definitions
172
  def _parse_yolo_labels(self, label_path, width, height):
173
- annotations = []
174
- with open(label_path, 'r') as file:
175
- yolo_data = file.readlines()
176
-
177
- for line in yolo_data:
178
- class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split())
179
- x_min = (x_center_rel - width_rel / 2) * width
180
- y_min = (y_center_rel - height_rel / 2) * height
181
- x_max = (x_center_rel + width_rel / 2) * width
182
- y_max = (y_center_rel + height_rel / 2) * height
183
- annotations.append({
184
- "category_id": int(class_id),
185
- "bounding_box": {
186
- "x_min": x_min,
187
- "y_min": y_min,
188
- "x_max": x_max,
189
- "y_max": y_max
190
- }
191
- })
192
- return annotations
193
 
194
  def _generate_examples(self, filepaths, species_info, data_dir, split):
195
  """Yields examples as (key, example) tuples."""
 
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
+ BUILDER_CONFIGS = [
64
+ datasets.BuilderConfig(name="full", version=VERSION, description="The full dataset"),
65
+ datasets.BuilderConfig(name="small", version=VERSION, description="A small sample of the dataset for quicker loading"),
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = "full"
69
+
70
  # This is an example of a dataset with multiple configurations.
71
  # If you don't want/need to define several sub-sets in your dataset,
72
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
 
115
 
116
 
117
  def _split_generators(self, dl_manager):
118
+ # Download and extract the dataset using Hugging Face's datasets library
119
+ data_files = dl_manager.download_and_extract({
120
+ "csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv",
121
+ "zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip"
122
+ })
123
+
124
+ # Load the CSV file containing species and scientific names
125
+ species_info = pd.read_csv(data_files["csv"])
126
+
127
+ # The directory 'Labeled Stomatal Images' is where the images and labels are stored after extraction
128
+ extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images")
129
 
130
+ # Get the list of image filenames from the CSV
131
+ all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist()
132
 
133
+ # Shuffle the list for random split
134
+ random.seed(42) # Set a random seed for reproducibility
135
+ random.shuffle(all_image_filenames)
 
 
 
 
 
 
136
 
137
+ if self.config.name == "full":
138
+ # Use all the data for the full configuration
139
+ train_files, val_files, test_files = self._split_files(all_image_filenames)
140
+ elif self.config.name == "small":
141
+ # Use only a subset of the data for the small configuration
142
+ small_subset = all_image_filenames[:1000] # For example, use only the first 100 images
143
+ train_files, val_files, test_files = self._split_files(small_subset)
144
+
145
+
146
 
147
+
148
+ return [
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TRAIN,
151
+ gen_kwargs={
152
+ "filepaths": train_files,
153
+ "species_info": species_info,
154
+ "data_dir": extracted_images_path,
155
+ "split": "train",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.VALIDATION,
160
+ gen_kwargs={
161
+ "filepaths": val_files,
162
+ "species_info": species_info,
163
+ "data_dir": extracted_images_path,
164
+ "split": "train",
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TEST,
169
+ gen_kwargs={
170
+ "filepaths": test_files,
171
+ "species_info": species_info,
172
+ "data_dir": extracted_images_path,
173
+ "split": "train",
174
+ },
175
+ ),
176
+ ]
177
+
178
+ def _split_files(self, file_list):
179
+ num_files = len(file_list)
180
+ train_split_end = int(num_files * 0.7)
181
+ val_split_end = train_split_end + int(num_files * 0.15)
182
+
183
+ train_files = file_list[:train_split_end]
184
+ val_files = file_list[train_split_end:val_split_end]
185
+ test_files = file_list[val_split_end:]
186
+
187
+ return train_files, val_files, test_files
188
+
189
+
190
 
191
+
192
 
193
  # ... other necessary imports and class definitions
194
  def _parse_yolo_labels(self, label_path, width, height):
195
+ annotations = []
196
+ with open(label_path, 'r') as file:
197
+ yolo_data = file.readlines()
198
+
199
+ for line in yolo_data:
200
+ class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split())
201
+ x_min = (x_center_rel - width_rel / 2) * width
202
+ y_min = (y_center_rel - height_rel / 2) * height
203
+ x_max = (x_center_rel + width_rel / 2) * width
204
+ y_max = (y_center_rel + height_rel / 2) * height
205
+ annotations.append({
206
+ "category_id": int(class_id),
207
+ "bounding_box": {
208
+ "x_min": x_min,
209
+ "y_min": y_min,
210
+ "x_max": x_max,
211
+ "y_max": y_max
212
+ }
213
+ })
214
+ return annotations
215
 
216
  def _generate_examples(self, filepaths, species_info, data_dir, split):
217
  """Yields examples as (key, example) tuples."""