Chris Oswald commited on
Commit
f7a1cfb
1 Parent(s): 61fd3fa

added patient id splits

Browse files
Files changed (1) hide show
  1. SPIDER.py +73 -12
SPIDER.py CHANGED
@@ -23,7 +23,7 @@ from typing import Dict, List, Optional, Set
23
  import numpy as np
24
 
25
  import datasets
26
-
27
 
28
  # TODO: Add BibTeX citation
29
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -160,12 +160,19 @@ class SPIDER(datasets.GeneratorBasedBuilder):
160
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
161
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
162
 
163
- # Generate train/validate/test partitions of patient IDs
 
164
  np.random.seed(9999)
165
- N_PATIENTS = 257 #TODO: make hardcoded values dynamic
166
  VALIDATE_SHARE = 0.3
167
  TEST_SHARE = 0.2
168
  TRAIN_SHARE = (1.0 - VALIDATE_SHARE - TEST_SHARE)
 
 
 
 
 
 
169
  partition = np.random.choice(
170
  ['train', 'dev', 'test'],
171
  p=[TRAIN_SHARE, VALIDATE_SHARE, TEST_SHARE],
@@ -176,9 +183,7 @@ class SPIDER(datasets.GeneratorBasedBuilder):
176
  validate_ids = set(patient_ids[partition == 'dev'])
177
  test_ids = set(patient_ids[partition == 'test'])
178
  assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
179
-
180
-
181
-
182
  # Import patient/scanner data and radiological gradings data
183
  overview_data = import_csv_data(paths_dict['overview'])
184
  grades_data = import_csv_data(paths_dict['gradings'])
@@ -196,17 +201,73 @@ class SPIDER(datasets.GeneratorBasedBuilder):
196
  ]
197
  assert len(mask_files) > 0, "No mask files found--check directory path."
198
 
199
- images = []
200
- masks = []
 
 
 
 
 
 
 
 
 
 
201
  if split == 'train':
202
- for patient_id in train_ids:
203
-
204
-
205
  elif split == 'validate':
206
-
207
  elif split == 'test':
 
 
 
 
 
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
 
 
 
 
 
 
 
 
 
 
 
210
  def import_csv_data(filepath: str) -> List[Dict[str, str]]:
211
  """Import all rows of CSV file."""
212
  results = []
 
23
  import numpy as np
24
 
25
  import datasets
26
+ import SimpleITK as sitk
27
 
28
  # TODO: Add BibTeX citation
29
  # Find for instance the citation on arxiv or on the dataset repo/website
 
160
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
161
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
162
 
163
+ # Config params
164
+ #TODO: make hardcoded values dynamic
165
  np.random.seed(9999)
166
+ N_PATIENTS = 257
167
  VALIDATE_SHARE = 0.3
168
  TEST_SHARE = 0.2
169
  TRAIN_SHARE = (1.0 - VALIDATE_SHARE - TEST_SHARE)
170
+
171
+ scan_types = ['t1', 't2', 't2_SPACE']
172
+
173
+
174
+
175
+ # Generate train/validate/test partitions of patient IDs
176
  partition = np.random.choice(
177
  ['train', 'dev', 'test'],
178
  p=[TRAIN_SHARE, VALIDATE_SHARE, TEST_SHARE],
 
183
  validate_ids = set(patient_ids[partition == 'dev'])
184
  test_ids = set(patient_ids[partition == 'test'])
185
  assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
186
+
 
 
187
  # Import patient/scanner data and radiological gradings data
188
  overview_data = import_csv_data(paths_dict['overview'])
189
  grades_data = import_csv_data(paths_dict['gradings'])
 
201
  ]
202
  assert len(mask_files) > 0, "No mask files found--check directory path."
203
 
204
+ # Filter image and mask data based on scan types
205
+ image_files = [
206
+ file for file in image_files
207
+ if any(scan_type in file for scan_type in scan_types)
208
+ ]
209
+
210
+ mask_files = [
211
+ file for file in mask_files
212
+ if any(scan_type in file for scan_type in scan_types)
213
+ ]
214
+
215
+ # Subset train/validation/test partition images and mask files
216
  if split == 'train':
217
+ subset_ids = train_ids
 
 
218
  elif split == 'validate':
219
+ subset_ids = validate_ids
220
  elif split == 'test':
221
+ subset_ids = test_ids
222
+ else:
223
+ subset_ids = None
224
+ raise ValueError(f'Split argument "{split}" is not recognized. \
225
+ Please enter one of ["train", "validate", "test"]')
226
 
227
+ image_files = [
228
+ file for file in image_files
229
+ if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids)
230
+ ]
231
+
232
+ mask_files = [
233
+ file for file in mask_files
234
+ if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids)
235
+ ]
236
+ assert len(image_files) == len(mask_files), "The number of image files\
237
+ does not match the number of mask files--verify subsetting operation."
238
+
239
+ # Shuffle order of patient scans
240
+ # (note that only images need to be shuffled since masks and metadata
241
+ # will be linked to the selected image)
242
+ np.random.shuffle(image_files)
243
+
244
+ ## Generate next example
245
+ # ----------------------
246
+ for example in image_files:
247
+
248
+ # Extract linking data
249
+ scan_id = example.replace('.mha', '')
250
+ patient_id = scan_id.split('_')[0]
251
+ scan_type = '_'.join(scan_id.split('_')[1:])
252
+
253
+ # Load .mha file
254
+ image_path = os.path.join(paths_dict['images'], 'images', example)
255
+ image = sitk.ReadImage(image_path)
256
+
257
+ # Convert .mha image to numeric array
258
+ image_array = sitk.GetArrayFromImage(image)
259
 
260
+ # Extract overview data corresponding to image
261
+
262
+ # Extract patient radiological gradings corresponding to image
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
  def import_csv_data(filepath: str) -> List[Dict[str, str]]:
272
  """Import all rows of CSV file."""
273
  results = []