lisawen commited on
Commit
92e2a57
1 Parent(s): 0d700de

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +82 -31
soybean_dataset.py CHANGED
@@ -26,6 +26,12 @@ import numpy as np
26
  from PIL import Image
27
  import os
28
  import io
 
 
 
 
 
 
29
 
30
 
31
  # TODO: Add BibTeX citation
@@ -60,13 +66,17 @@ _LICENSE = "Under a Creative Commons license"
60
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
61
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
62
  _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
63
-
 
 
 
 
64
 
65
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
66
  class SoybeanDataset(datasets.GeneratorBasedBuilder):
67
  """TODO: Short description of my dataset."""
68
 
69
- _URLS = _URL
70
  VERSION = datasets.Version("1.1.0")
71
 
72
  def _info(self):
@@ -94,53 +104,56 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
94
  # Since the dataset is on Google Drive, you need to implement a way to download it using the Google Drive API.
95
 
96
  # The path to the dataset file in Google Drive
97
- dataset_path = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
98
-
99
- # Check if the file exists (you may need to mount the drive and use the appropriate path)
100
- if not os.path.exists(dataset_path):
101
- raise FileNotFoundError(f"{dataset_path} does not exist. Have you mounted Google Drive?")
102
 
103
  # Since we're using a local file, we don't need to download it, so we just return the path.
104
  return [
105
  datasets.SplitGenerator(
106
- name=datasets.Split,
107
- gen_kwargs={
108
- "filepath": dataset_path
109
- }
110
- ),
111
  ]
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  def _generate_examples(self, filepath):
114
  #"""Yields examples as (key, example) tuples."""
115
-
116
- # Check if the file exists (you may need to mount the drive and use the appropriate path)
117
- if not os.path.exists(filepath):
118
- raise FileNotFoundError(f"{filepath} does not exist. Have you mounted Google Drive?")
119
-
120
- # Read the dataset.csv
121
  with open(filepath, encoding="utf-8") as f:
122
- reader = csv.DictReader(f)
123
-
124
- for row in reader:
 
 
125
  # Assuming the 'original_image' column has the full path to the image file
 
126
  original_image_path = row['original_image']
127
  segmentation_image_path = row['segmentation_image']
128
  sets = row['sets']
129
 
130
- # Open the image and convert to numpy array
131
- with open(original_image_path, "rb") as image_file:
132
- original_image = Image.open(image_file)
133
- original_image_array = np.array(original_image)
134
-
135
-
136
- # Open the image and convert to numpy array
137
- with open(segmentation_image_path, "rb") as image_file:
138
- segmentation_image = Image.open(image_file)
139
- segmentation_image_array = np.array(segmentation_image)
140
 
 
141
  # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
142
  # with actual columns from your CSV or additional processing you need to do
143
  yield row['unique_id'], {
 
144
  "sets": sets,
145
  "original_image": original_image_array,
146
  "segmentation_image": segmentation_image_array,
@@ -157,3 +170,41 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
157
 
158
 
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  from PIL import Image
27
  import os
28
  import io
29
+ import pandas as pd
30
+ import matplotlib.pyplot as plt
31
+ from numpy import asarray
32
+ import requests
33
+ from io import BytesIO
34
+ from numpy import asarray
35
 
36
 
37
  # TODO: Add BibTeX citation
 
66
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
67
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
68
  _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
69
+ _URLs = {
70
+ "train" : "https://drive.google.com/file/d/1-5Tdr_OTUUfkjf_UCa5EZOjGdlW683S-/view?usp=sharing",
71
+ "test": "https://drive.google.com/file/d/1-2wUyuBTeesGxLuDCvxRcUPdftL-Zen9/view?usp=sharing",
72
+ "valid": "https://drive.google.com/file/d/1-1DeSjBY9YlfGCl7CvoU97h7eX95R1eC/view?usp=sharing"
73
+ }
74
 
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
76
  class SoybeanDataset(datasets.GeneratorBasedBuilder):
77
  """TODO: Short description of my dataset."""
78
 
79
+ _URLS = _URLs
80
  VERSION = datasets.Version("1.1.0")
81
 
82
  def _info(self):
 
104
  # Since the dataset is on Google Drive, you need to implement a way to download it using the Google Drive API.
105
 
106
  # The path to the dataset file in Google Drive
107
+ urls_to_download = self._URLs
108
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
 
 
109
 
110
  # Since we're using a local file, we don't need to download it, so we just return the path.
111
  return [
112
  datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
118
  ]
119
 
120
+ def process_image(self,image_url):
121
+ response = requests.get(image_url)
122
+ response.raise_for_status() # This will raise an exception if there is a download error
123
+
124
+ # Open the image from the downloaded bytes
125
+ img = Image.open(BytesIO(response.content))
126
+
127
+ numpydata = asarray(img)
128
+
129
+ return numpydata
130
+
131
+
132
+
133
  def _generate_examples(self, filepath):
134
  #"""Yields examples as (key, example) tuples."""
135
+ logging.info("generating examples from = %s", filepath)
136
+
 
 
 
 
137
  with open(filepath, encoding="utf-8") as f:
138
+ data = csv.DictReader(f)
139
+ for image_url in data["original_image"]:
140
+ numpydata = self.process_image(image_url)
141
+
142
+ for row in data:
143
  # Assuming the 'original_image' column has the full path to the image file
144
+ unique_id = row['unique_id']
145
  original_image_path = row['original_image']
146
  segmentation_image_path = row['segmentation_image']
147
  sets = row['sets']
148
 
149
+ original_image_array = self.process_image(original_image_path)
150
+ segmentation_image_array = self.process_image(segmentation_image_path)
 
 
 
 
 
 
 
 
151
 
152
+
153
  # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
154
  # with actual columns from your CSV or additional processing you need to do
155
  yield row['unique_id'], {
156
+ "unique_id": unique_id,
157
  "sets": sets,
158
  "original_image": original_image_array,
159
  "segmentation_image": segmentation_image_array,
 
170
 
171
 
172
 
173
+
174
+
175
+
176
+ #### origin
177
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
178
+ urls_to_download = self._URLS
179
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
180
+
181
+ return [
182
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
183
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
184
+ ]
185
+
186
+ def _generate_examples(self, filepath):
187
+ """This function returns the examples in the raw (text) form."""
188
+ logging.info("generating examples from = %s", filepath)
189
+ with open(filepath) as f:
190
+ squad = json.load(f)
191
+ for article in squad["data"]:
192
+ title = article.get("title", "").strip()
193
+ for paragraph in article["paragraphs"]:
194
+ context = paragraph["context"].strip()
195
+ for qa in paragraph["qas"]:
196
+ question = qa["question"].strip()
197
+ id_ = qa["id"]
198
+
199
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
200
+ answers = [answer["text"].strip() for answer in qa["answers"]]
201
+
202
+ # Features currently used are "context", "question", and "answers".
203
+ # Others are extracted here for the ease of future expansions.
204
+ yield id_, {
205
+ "title": title,
206
+ "context": context,
207
+ "question": question,
208
+ "id": id_,
209
+ "answers": {"answer_start": answer_starts, "text": answers,},
210
+ }