lisawen commited on
Commit
009b97c
1 Parent(s): a72578b

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +153 -22
soybean_dataset.py CHANGED
@@ -117,43 +117,174 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
117
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
118
  ]
119
 
120
- def process_image(self, image_path):
121
- # Load the image from the local filesystem
122
- img = Image.open(image_path)
123
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  def _generate_examples(self, filepath):
 
126
  logging.info("generating examples from = %s", filepath)
127
 
128
  with open(filepath, encoding="utf-8") as f:
129
  data = csv.DictReader(f)
130
 
 
131
  for row in data:
 
132
  unique_id = row['unique_id']
133
- original_image_path = row['original_image'] # Adjust this path if necessary
134
- segmentation_image_path = row['segmentation_image'] # Adjust this path if necessary
135
-
136
- # Check if image exists locally before loading
137
- if os.path.exists(original_image_path):
138
- original_image = self.process_image(original_image_path)
139
- else:
140
- logging.error(f"Original image not found: {original_image_path}")
141
- continue # or handle missing image appropriately
142
-
143
- if os.path.exists(segmentation_image_path):
144
- segmentation_image = self.process_image(segmentation_image_path)
145
- else:
146
- logging.error(f"Segmentation image not found: {segmentation_image_path}")
147
- continue # or handle missing image appropriately
148
-
149
- yield unique_id, {
150
  "unique_id": unique_id,
151
- "sets": row['sets'],
152
  "original_image": original_image,
153
  "segmentation_image": segmentation_image,
154
  # ... add other features if necessary
155
  }
156
 
 
 
 
 
 
 
 
 
 
 
157
 
158
 
159
 
 
117
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
118
  ]
119
 
120
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
121
+ #
122
+ # Licensed under the Apache License, Version 2.0 (the "License");
123
+ # you may not use this file except in compliance with the License.
124
+ # You may obtain a copy of the License at
125
+ #
126
+ # http://www.apache.org/licenses/LICENSE-2.0
127
+ #
128
+ # Unless required by applicable law or agreed to in writing, software
129
+ # distributed under the License is distributed on an "AS IS" BASIS,
130
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
131
+ # See the License for the specific language governing permissions and
132
+ # limitations under the License.
133
+ # TODO: Address all TODOs and remove all explanatory comments
134
+ """TODO: Add a description here."""
135
+
136
+
137
+ import csv
138
+ import json
139
+ import os
140
+ from typing import List
141
+ import datasets
142
+ import logging
143
+ import csv
144
+ import numpy as np
145
+ from PIL import Image
146
+ import os
147
+ import io
148
+ import pandas as pd
149
+ import matplotlib.pyplot as plt
150
+ from numpy import asarray
151
+ import requests
152
+ from io import BytesIO
153
+ from numpy import asarray
154
+
155
+
156
+ # TODO: Add BibTeX citation
157
+ # Find for instance the citation on arxiv or on the dataset repo/website
158
+ _CITATION = """\
159
+ @article{chen2023dataset,
160
+ title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis},
161
+ author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J},
162
+ journal={Data in Brief},
163
+ volume={52},
164
+ pages={109833},
165
+ year={2023},
166
+ publisher={Elsevier},
167
+ doi={10.1016/j.dib.2023.109833}
168
+ }
169
+
170
+ """
171
+
172
+ # TODO: Add description of the dataset here
173
+ # You can copy an official description
174
+ _DESCRIPTION = """\
175
+ This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset.
176
+ """
177
+
178
+ # TODO: Add a link to an official homepage for the dataset here
179
+ _HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset"
180
+
181
+ # TODO: Add the licence for the dataset here if you can find it
182
+ _LICENSE = "Under a Creative Commons license"
183
+
184
+ # TODO: Add link to the official dataset URLs here
185
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
186
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
187
+ _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
188
+ _URLs = {
189
+ "train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv",
190
+ "test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv",
191
+ "valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv"
192
+ }
193
+
194
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
195
+ class SoybeanDataset(datasets.GeneratorBasedBuilder):
196
+ """TODO: Short description of my dataset."""
197
+
198
+ _URLs = _URLs
199
+ VERSION = datasets.Version("1.1.0")
200
+
201
+ def _info(self):
202
+ # raise ValueError('woops!')
203
+ return datasets.DatasetInfo(
204
+ description=_DESCRIPTION,
205
+ features=datasets.Features(
206
+ {
207
+ "unique_id": datasets.Value("string"),
208
+ "sets": datasets.Value("string"),
209
+ "original_image": datasets.Image(),
210
+ "segmentation_image": datasets.Image(),
211
+
212
+ }
213
+ ),
214
+ # No default supervised_keys (as we have to pass both question
215
+ # and context as input).
216
+ supervised_keys=("original_image","segmentation_image"),
217
+ homepage="https://github.com/lisawen0707/soybean/tree/main",
218
+ citation=_CITATION,
219
+ )
220
+
221
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
222
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
223
+ # Since the dataset is on Google Drive, you need to implement a way to download it using the Google Drive API.
224
+
225
+ # The path to the dataset file in Google Drive
226
+ urls_to_download = self._URLs
227
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
228
+
229
+ # Since we're using a local file, we don't need to download it, so we just return the path.
230
+ return [
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
233
+ datasets.SplitGenerator(
234
+ name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
235
+ datasets.SplitGenerator(
236
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
237
+ ]
238
+
239
+ def process_image(self,image_url):
240
+ response = requests.get(image_url)
241
+ response.raise_for_status() # This will raise an exception if there is a download error
242
+
243
+ # Open the image from the downloaded bytes and return the PIL Image
244
+ img = Image.open(BytesIO(response.content))
245
+ return img
246
+
247
+
248
 
249
  def _generate_examples(self, filepath):
250
+ #"""Yields examples as (key, example) tuples."""
251
  logging.info("generating examples from = %s", filepath)
252
 
253
  with open(filepath, encoding="utf-8") as f:
254
  data = csv.DictReader(f)
255
 
256
+
257
  for row in data:
258
+ # Assuming the 'original_image' column has the full path to the image file
259
  unique_id = row['unique_id']
260
+ original_image_path = row['original_image']
261
+ segmentation_image_path = row['segmentation_image']
262
+ sets = row['sets']
263
+
264
+ original_image = self.process_image(original_image_path)
265
+ segmentation_image = self.process_image(segmentation_image_path)
266
+
267
+
268
+ # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
269
+ # with actual columns from your CSV or additional processing you need to do
270
+ yield row['unique_id'], {
 
 
 
 
 
 
271
  "unique_id": unique_id,
272
+ "sets": sets,
273
  "original_image": original_image,
274
  "segmentation_image": segmentation_image,
275
  # ... add other features if necessary
276
  }
277
 
278
+
279
+
280
+
281
+
282
+
283
+
284
+
285
+
286
+
287
+
288
 
289
 
290