Margerie commited on
Commit
5e2c32d
1 Parent(s): 9ca35f3

requirements, model weights, preprocessing and post processing

Browse files
Files changed (7) hide show
  1. best_metric_model.pth +3 -0
  2. dicom_to_nii.py +372 -0
  3. nii_to_dicom.py +501 -0
  4. postprocessing.py +347 -0
  5. predict.py +192 -0
  6. preprocessing.py +309 -0
  7. requirements.txt +1 -0
best_metric_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5be1fba4eaafed151c66bd8aa83f7bfeeffb7af378b6d4b546231c9b25641db
3
+ size 19305909
dicom_to_nii.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pydicom
2
+ import sys
3
+ import os
4
+ import numpy as np
5
+ import nibabel as nib
6
+ import scipy
7
+
8
+ def convert_ct_dicom_to_nii(dir_dicom, dir_nii, outputname, newvoxelsize=None):
9
+ Patients = PatientList() # initialize list of patient data
10
+ # search dicom files in the patient data folder, stores all files in the attributes (all CT images, dose file, struct file)
11
+ Patients.list_dicom_files(dir_dicom, 1)
12
+ patient = Patients.list[0]
13
+ patient_name = patient.PatientInfo.PatientName
14
+ patient.import_patient_data(newvoxelsize)
15
+ CT = patient.CTimages[0]
16
+ image_position_patient = CT.ImagePositionPatient
17
+ voxelsize = np.array(CT.PixelSpacing)
18
+ save_images(dst_dir=os.path.join(dir_nii), voxelsize=voxelsize,
19
+ image_position_patient=image_position_patient, image=CT.Image, outputname=outputname)
20
+ return CT
21
+
22
+
23
+ def save_images(dst_dir, voxelsize, image_position_patient, image, outputname):
24
+
25
+ # encode in nii and save at dst_dir
26
+ # IMPORTANT I NEED TO CONFIRM THE SIGNS OF THE ENTRIES IN THE AFFINE,
27
+ # ALTHOUGH MAYBE AT THE END THE IMPORTANCE IS HOW WE WILL USE THIS DATA ....
28
+ # also instead of changing field by field, the pixdim and affine can be encoded
29
+ # using the set_sform method --> info here: https://nipy.org/nibabel/nifti_images.html
30
+
31
+ # IMAGE (CT, MR ...)
32
+ image_shape = image.shape
33
+ # Separate Conversion from preprocessing
34
+ # image = overwrite_ct_threshold(image)
35
+ # for Nifti1 header, change for a Nifti2 type of header
36
+ image_nii = nib.Nifti1Image(image, affine=np.eye(4))
37
+ # Update header fields
38
+ image_nii = set_header_info(image_nii, voxelsize, image_position_patient)
39
+
40
+ # Save nii
41
+ nib.save(image_nii, os.path.join(dst_dir, outputname))
42
+
43
+ # nib.save(image_nii, os.path.join(dst_dir, 'ct.nii.gz'))
44
+
45
+
46
+ # def overwrite_ct_threshold(ct_image, body, artefact=None, contrast=None):
47
+ # # Change the HU out of the body to air: -1000
48
+ # ct_image[body == 0] = -1000
49
+ # if artefact is not None:
50
+ # # Change the HU to muscle: 14
51
+ # ct_image[artefact == 1] = 14
52
+ # if contrast is not None:
53
+ # # Change the HU to water: 0 Houndsfield Unit: CT unit
54
+ # ct_image[contrast == 1] = 0
55
+ # # Threshold above 1560HU
56
+ # ct_image[ct_image > 1560] = 1560
57
+ # return ct_image
58
+
59
+
60
+ def set_header_info(nii_file, voxelsize, image_position_patient, contours_exist=None):
61
+ nii_file.header['pixdim'][1] = voxelsize[0]
62
+ nii_file.header['pixdim'][2] = voxelsize[1]
63
+ nii_file.header['pixdim'][3] = voxelsize[2]
64
+
65
+ # affine - voxelsize
66
+ nii_file.affine[0][0] = voxelsize[0]
67
+ nii_file.affine[1][1] = voxelsize[1]
68
+ nii_file.affine[2][2] = voxelsize[2]
69
+ # affine - imagecorner
70
+ nii_file.affine[0][3] = image_position_patient[0]
71
+ nii_file.affine[1][3] = image_position_patient[1]
72
+ nii_file.affine[2][3] = image_position_patient[2]
73
+ if contours_exist:
74
+ nii_file.header.extensions.append(
75
+ nib.nifti1.Nifti1Extension(0, bytearray(contours_exist)))
76
+ return nii_file
77
+
78
+
79
+ class PatientList:
80
+
81
+ def __init__(self):
82
+ self.list = []
83
+
84
+ def find_CT_image(self, display_id):
85
+ count = -1
86
+ for patient_id in range(len(self.list)):
87
+ for ct_id in range(len(self.list[patient_id].CTimages)):
88
+ if (self.list[patient_id].CTimages[ct_id].isLoaded == 1):
89
+ count += 1
90
+ if (count == display_id):
91
+ break
92
+ if (count == display_id):
93
+ break
94
+
95
+ return patient_id, ct_id
96
+
97
+ def find_dose_image(self, display_id):
98
+ count = -1
99
+ for patient_id in range(len(self.list)):
100
+ for dose_id in range(len(self.list[patient_id].RTdoses)):
101
+ if (self.list[patient_id].RTdoses[dose_id].isLoaded == 1):
102
+ count += 1
103
+ if (count == display_id):
104
+ break
105
+ if (count == display_id):
106
+ break
107
+
108
+ return patient_id, dose_id
109
+
110
+ def find_contour(self, ROIName):
111
+ for patient_id in range(len(self.list)):
112
+ for struct_id in range(len(self.list[patient_id].RTstructs)):
113
+ if (self.list[patient_id].RTstructs[struct_id].isLoaded == 1):
114
+ for contour_id in range(len(self.list[patient_id].RTstructs[struct_id].Contours)):
115
+ if (self.list[patient_id].RTstructs[struct_id].Contours[contour_id].ROIName == ROIName):
116
+ return patient_id, struct_id, contour_id
117
+
118
+ def list_dicom_files(self, folder_path, recursive):
119
+ file_list = os.listdir(folder_path)
120
+ # print("len file_list", len(file_list), "folderpath",folder_path)
121
+ for file_name in file_list:
122
+ file_path = os.path.join(folder_path, file_name)
123
+
124
+ # folders
125
+ if os.path.isdir(file_path):
126
+ if recursive == True:
127
+ subfolder_list = self.list_dicom_files(file_path, True)
128
+ # join_patient_lists(Patients, subfolder_list)
129
+
130
+ # files
131
+ elif os.path.isfile(file_path):
132
+
133
+ try:
134
+ dcm = pydicom.dcmread(file_path)
135
+ except:
136
+ print("Invalid Dicom file: " + file_path)
137
+ continue
138
+
139
+ patient_id = next((x for x, val in enumerate(
140
+ self.list) if val.PatientInfo.PatientID == dcm.PatientID), -1)
141
+
142
+ if patient_id == -1:
143
+ Patient = PatientData()
144
+ Patient.PatientInfo.PatientID = dcm.PatientID
145
+ Patient.PatientInfo.PatientName = str(dcm.PatientName)
146
+ Patient.PatientInfo.PatientBirthDate = dcm.PatientBirthDate
147
+ Patient.PatientInfo.PatientSex = dcm.PatientSex
148
+ self.list.append(Patient)
149
+ patient_id = len(self.list) - 1
150
+
151
+ # Dicom CT
152
+ if dcm.SOPClassUID == "1.2.840.10008.5.1.4.1.1.2":
153
+ ct_id = next((x for x, val in enumerate(
154
+ self.list[patient_id].CTimages) if val.SeriesInstanceUID == dcm.SeriesInstanceUID), -1)
155
+ if ct_id == -1:
156
+ CT = CTimage()
157
+ CT.SeriesInstanceUID = dcm.SeriesInstanceUID
158
+ CT.SOPClassUID == "1.2.840.10008.5.1.4.1.1.2"
159
+ CT.PatientInfo = self.list[patient_id].PatientInfo
160
+ CT.StudyInfo = StudyInfo()
161
+ CT.StudyInfo.StudyInstanceUID = dcm.StudyInstanceUID
162
+ CT.StudyInfo.StudyID = dcm.StudyID
163
+ CT.StudyInfo.StudyDate = dcm.StudyDate
164
+ CT.StudyInfo.StudyTime = dcm.StudyTime
165
+ if (hasattr(dcm, 'SeriesDescription') and dcm.SeriesDescription != ""):
166
+ CT.ImgName = dcm.SeriesDescription
167
+ else:
168
+ CT.ImgName = dcm.SeriesInstanceUID
169
+ self.list[patient_id].CTimages.append(CT)
170
+ ct_id = len(self.list[patient_id].CTimages) - 1
171
+
172
+ self.list[patient_id].CTimages[ct_id].DcmFiles.append(
173
+ file_path)
174
+
175
+ else:
176
+ print("Unknown SOPClassUID " +
177
+ dcm.SOPClassUID + " for file " + file_path)
178
+
179
+ # other
180
+ else:
181
+ print("Unknown file type " + file_path)
182
+
183
+ def print_patient_list(self):
184
+ print("")
185
+ for patient in self.list:
186
+ patient.print_patient_info()
187
+
188
+ print("")
189
+
190
+
191
+ class PatientData:
192
+
193
+ def __init__(self):
194
+ self.PatientInfo = PatientInfo()
195
+ self.CTimages = []
196
+
197
+ def print_patient_info(self, prefix=""):
198
+ print("")
199
+ print(prefix + "PatientName: " + self.PatientInfo.PatientName)
200
+ print(prefix + "PatientID: " + self.PatientInfo.PatientID)
201
+
202
+ for ct in self.CTimages:
203
+ print("")
204
+ ct.print_CT_info(prefix + " ")
205
+
206
+ def import_patient_data(self, newvoxelsize=None):
207
+ # import CT images
208
+ for i, ct in enumerate(self.CTimages):
209
+ if (ct.isLoaded == 1):
210
+ continue
211
+ ct.import_Dicom_CT()
212
+ # Resample CT images
213
+ for i, ct in enumerate(self.CTimages):
214
+ ct.resample_CT(newvoxelsize)
215
+
216
+
217
+ class PatientInfo:
218
+
219
+ def __init__(self):
220
+ self.PatientID = ''
221
+ self.PatientName = ''
222
+ self.PatientBirthDate = ''
223
+ self.PatientSex = ''
224
+
225
+
226
+ class StudyInfo:
227
+
228
+ def __init__(self):
229
+ self.StudyInstanceUID = ''
230
+ self.StudyID = ''
231
+ self.StudyDate = ''
232
+ self.StudyTime = ''
233
+
234
+
235
+ class CTimage:
236
+
237
+ def __init__(self):
238
+ self.SeriesInstanceUID = ""
239
+ self.PatientInfo = {}
240
+ self.StudyInfo = {}
241
+ self.FrameOfReferenceUID = ""
242
+ self.ImgName = ""
243
+ self.SOPClassUID = ""
244
+ self.DcmFiles = []
245
+ self.isLoaded = 0
246
+
247
+ def print_CT_info(self, prefix=""):
248
+ print(prefix + "CT series: " + self.SeriesInstanceUID)
249
+ for ct_slice in self.DcmFiles:
250
+ print(prefix + " " + ct_slice)
251
+
252
+ def resample_CT(self, newvoxelsize):
253
+ ct = self.Image
254
+ # Rescaling to the newvoxelsize if given in parameter
255
+ if newvoxelsize is not None:
256
+ source_shape = self.GridSize
257
+ voxelsize = self.PixelSpacing
258
+ # print("self.ImagePositionPatient",self.ImagePositionPatient, "source_shape",source_shape,"voxelsize",voxelsize)
259
+ VoxelX_source = self.ImagePositionPatient[0] + \
260
+ np.arange(source_shape[0])*voxelsize[0]
261
+ VoxelY_source = self.ImagePositionPatient[1] + \
262
+ np.arange(source_shape[1])*voxelsize[1]
263
+ VoxelZ_source = self.ImagePositionPatient[2] + \
264
+ np.arange(source_shape[2])*voxelsize[2]
265
+
266
+ target_shape = np.ceil(np.array(source_shape).astype(
267
+ float)*np.array(voxelsize).astype(float)/newvoxelsize).astype(int)
268
+ VoxelX_target = self.ImagePositionPatient[0] + \
269
+ np.arange(target_shape[0])*newvoxelsize[0]
270
+ VoxelY_target = self.ImagePositionPatient[1] + \
271
+ np.arange(target_shape[1])*newvoxelsize[1]
272
+ VoxelZ_target = self.ImagePositionPatient[2] + \
273
+ np.arange(target_shape[2])*newvoxelsize[2]
274
+ # print("source_shape",source_shape,"target_shape",target_shape)
275
+ if (all(source_shape == target_shape) and np.linalg.norm(np.subtract(voxelsize, newvoxelsize) < 0.001)):
276
+ print("Image does not need filtering")
277
+ else:
278
+ # anti-aliasing filter
279
+ sigma = [0, 0, 0]
280
+ if (newvoxelsize[0] > voxelsize[0]):
281
+ sigma[0] = 0.4 * (newvoxelsize[0]/voxelsize[0])
282
+ if (newvoxelsize[1] > voxelsize[1]):
283
+ sigma[1] = 0.4 * (newvoxelsize[1]/voxelsize[1])
284
+ if (newvoxelsize[2] > voxelsize[2]):
285
+ sigma[2] = 0.4 * (newvoxelsize[2]/voxelsize[2])
286
+
287
+ if (sigma != [0, 0, 0]):
288
+ print("Image is filtered before downsampling")
289
+ ct = scipy.ndimage.gaussian_filter(ct, sigma)
290
+
291
+ xi = np.array(np.meshgrid(
292
+ VoxelX_target, VoxelY_target, VoxelZ_target))
293
+ xi = np.rollaxis(xi, 0, 4)
294
+ xi = xi.reshape((xi.size // 3, 3))
295
+
296
+ # get resized ct
297
+ ct = scipy.interpolate.interpn((VoxelX_source, VoxelY_source, VoxelZ_source), ct, xi, method='linear',
298
+ fill_value=-1000, bounds_error=False).reshape(target_shape).transpose(1, 0, 2)
299
+
300
+ self.PixelSpacing = newvoxelsize
301
+ self.GridSize = list(ct.shape)
302
+ self.NumVoxels = self.GridSize[0] * self.GridSize[1] * self.GridSize[2]
303
+ self.Image = ct
304
+ # print("self.ImagePositionPatient",self.ImagePositionPatient, "self.GridSize[0]",self.GridSize[0],"self.PixelSpacing",self.PixelSpacing)
305
+
306
+ self.VoxelX = self.ImagePositionPatient[0] + \
307
+ np.arange(self.GridSize[0])*self.PixelSpacing[0]
308
+ self.VoxelY = self.ImagePositionPatient[1] + \
309
+ np.arange(self.GridSize[1])*self.PixelSpacing[1]
310
+ self.VoxelZ = self.ImagePositionPatient[2] + \
311
+ np.arange(self.GridSize[2])*self.PixelSpacing[2]
312
+ self.isLoaded = 1
313
+
314
+ def import_Dicom_CT(self):
315
+
316
+ if (self.isLoaded == 1):
317
+ print("Warning: CT serries " +
318
+ self.SeriesInstanceUID + " is already loaded")
319
+ return
320
+
321
+ images = []
322
+ SOPInstanceUIDs = []
323
+ SliceLocation = np.zeros(len(self.DcmFiles), dtype='float')
324
+
325
+ for i in range(len(self.DcmFiles)):
326
+ file_path = self.DcmFiles[i]
327
+ dcm = pydicom.dcmread(file_path)
328
+
329
+ if (hasattr(dcm, 'SliceLocation') and abs(dcm.SliceLocation - dcm.ImagePositionPatient[2]) > 0.001):
330
+ print("WARNING: SliceLocation (" + str(dcm.SliceLocation) +
331
+ ") is different than ImagePositionPatient[2] (" + str(dcm.ImagePositionPatient[2]) + ") for " + file_path)
332
+
333
+ SliceLocation[i] = float(dcm.ImagePositionPatient[2])
334
+ images.append(dcm.pixel_array * dcm.RescaleSlope +
335
+ dcm.RescaleIntercept)
336
+ SOPInstanceUIDs.append(dcm.SOPInstanceUID)
337
+
338
+ # sort slices according to their location in order to reconstruct the 3d image
339
+ sort_index = np.argsort(SliceLocation)
340
+ SliceLocation = SliceLocation[sort_index]
341
+ SOPInstanceUIDs = [SOPInstanceUIDs[n] for n in sort_index]
342
+ images = [images[n] for n in sort_index]
343
+ ct = np.dstack(images).astype("float32")
344
+
345
+ if ct.shape[0:2] != (dcm.Rows, dcm.Columns):
346
+ print("WARNING: GridSize " + str(ct.shape[0:2]) + " different from Dicom Rows (" + str(
347
+ dcm.Rows) + ") and Columns (" + str(dcm.Columns) + ")")
348
+
349
+ MeanSliceDistance = (
350
+ SliceLocation[-1] - SliceLocation[0]) / (len(images)-1)
351
+ if (abs(MeanSliceDistance - dcm.SliceThickness) > 0.001):
352
+ print("WARNING: MeanSliceDistance (" + str(MeanSliceDistance) +
353
+ ") is different from SliceThickness (" + str(dcm.SliceThickness) + ")")
354
+
355
+ self.FrameOfReferenceUID = dcm.FrameOfReferenceUID
356
+ self.ImagePositionPatient = [float(dcm.ImagePositionPatient[0]), float(
357
+ dcm.ImagePositionPatient[1]), SliceLocation[0]]
358
+ self.PixelSpacing = [float(dcm.PixelSpacing[0]), float(
359
+ dcm.PixelSpacing[1]), MeanSliceDistance]
360
+ self.GridSize = list(ct.shape)
361
+ self.NumVoxels = self.GridSize[0] * self.GridSize[1] * self.GridSize[2]
362
+ self.Image = ct
363
+ self.SOPInstanceUIDs = SOPInstanceUIDs
364
+ self.VoxelX = self.ImagePositionPatient[0] + \
365
+ np.arange(self.GridSize[0])*self.PixelSpacing[0]
366
+ self.VoxelY = self.ImagePositionPatient[1] + \
367
+ np.arange(self.GridSize[1])*self.PixelSpacing[1]
368
+ self.VoxelZ = self.ImagePositionPatient[2] + \
369
+ np.arange(self.GridSize[2])*self.PixelSpacing[2]
370
+ self.isLoaded = 1
371
+
372
+ print("Convert CT dicom to nii done")
nii_to_dicom.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import pydicom
3
+ import os
4
+ import glob
5
+ import numpy as np
6
+ from copy import deepcopy
7
+ from matplotlib.patches import Polygon
8
+ import warnings
9
+ from scipy.ndimage import find_objects
10
+ from scipy.ndimage.morphology import binary_fill_holes
11
+ from skimage import measure
12
+ from PIL import Image, ImageDraw
13
+ import scipy
14
+ import datetime
15
+
16
+ def convert_nii_to_dicom(dicomctdir, predictedNiiFile, predictedDicomFile, predicted_structures=[], rtstruct_colors=[], refCT = None):
17
+ # img = nib.load(os.path.join(predniidir, patient_id, 'RTStruct.nii.gz'))
18
+ # data = img.get_fdata()[:,:,:,1]
19
+ # patient_list = PatientList() # initialize list of patient data
20
+ # patient_list.list_dicom_files(os.path.join(ct_ref_path,patient,inner_ct_ref_path), 1) # search dicom files in the patient data folder, stores all files in the attributes (all CT images, dose file, struct file)
21
+ # refCT = patient_list.list[0].CTimages[0]
22
+ # refCT.import_Dicom_CT()
23
+
24
+ struct = RTstruct()
25
+ struct.load_from_nii(predictedNiiFile, predicted_structures, rtstruct_colors) #TODO add already the refCT info in here because there are fields to do that
26
+ if not struct.Contours[0].Mask_PixelSpacing == refCT.PixelSpacing:
27
+ struct.resample_struct(refCT.PixelSpacing)
28
+ struct.export_Dicom(refCT, predictedDicomFile)
29
+
30
+ # create_RT_struct(dicomctdir, data.transpose([1,0,2]).astype(int), dicomdir, predicted_structures)
31
+
32
+ class RTstruct:
33
+
34
+ def __init__(self):
35
+ self.SeriesInstanceUID = ""
36
+ self.PatientInfo = {}
37
+ self.StudyInfo = {}
38
+ self.CT_SeriesInstanceUID = ""
39
+ self.DcmFile = ""
40
+ self.isLoaded = 0
41
+ self.Contours = []
42
+ self.NumContours = 0
43
+
44
+
45
+ def print_struct_info(self, prefix=""):
46
+ print(prefix + "Struct: " + self.SeriesInstanceUID)
47
+ print(prefix + " " + self.DcmFile)
48
+
49
+
50
+ def print_ROINames(self):
51
+ print("RT Struct UID: " + self.SeriesInstanceUID)
52
+ count = -1
53
+ for contour in self.Contours:
54
+ count += 1
55
+ print(' [' + str(count) + '] ' + contour.ROIName)
56
+
57
+ def resample_struct(self, newvoxelsize):
58
+ # Rescaling to the newvoxelsize if given in parameter
59
+ if newvoxelsize is not None:
60
+ for i, Contour in enumerate(self.Contours):
61
+ source_shape = Contour.Mask_GridSize
62
+ voxelsize = Contour.Mask_PixelSpacing
63
+ VoxelX_source = Contour.Mask_Offset[0] + np.arange(source_shape[0])*voxelsize[0]
64
+ VoxelY_source = Contour.Mask_Offset[1] + np.arange(source_shape[1])*voxelsize[1]
65
+ VoxelZ_source = Contour.Mask_Offset[2] + np.arange(source_shape[2])*voxelsize[2]
66
+
67
+ target_shape = np.ceil(np.array(source_shape).astype(float)*np.array(voxelsize).astype(float)/newvoxelsize).astype(int)
68
+ VoxelX_target = Contour.Mask_Offset[0] + np.arange(target_shape[0])*newvoxelsize[0]
69
+ VoxelY_target = Contour.Mask_Offset[1] + np.arange(target_shape[1])*newvoxelsize[1]
70
+ VoxelZ_target = Contour.Mask_Offset[2] + np.arange(target_shape[2])*newvoxelsize[2]
71
+
72
+ contour = Contour.Mask
73
+
74
+ if(all(source_shape == target_shape) and np.linalg.norm(np.subtract(voxelsize, newvoxelsize) < 0.001)):
75
+ print("! Image does not need filtering")
76
+ else:
77
+ # anti-aliasing filter
78
+ sigma = [0, 0, 0]
79
+ if(newvoxelsize[0] > voxelsize[0]): sigma[0] = 0.4 * (newvoxelsize[0]/voxelsize[0])
80
+ if(newvoxelsize[1] > voxelsize[1]): sigma[1] = 0.4 * (newvoxelsize[1]/voxelsize[1])
81
+ if(newvoxelsize[2] > voxelsize[2]): sigma[2] = 0.4 * (newvoxelsize[2]/voxelsize[2])
82
+
83
+ if(sigma != [0, 0, 0]):
84
+ contour = scipy.ndimage.gaussian_filter(contour.astype(float), sigma)
85
+ #come back to binary
86
+ contour[np.where(contour>=0.5)] = 1
87
+ contour[np.where(contour<0.5)] = 0
88
+
89
+ xi = np.array(np.meshgrid(VoxelX_target, VoxelY_target, VoxelZ_target))
90
+ xi = np.rollaxis(xi, 0, 4)
91
+ xi = xi.reshape((xi.size // 3, 3))
92
+
93
+ # get resized ct
94
+ contour = scipy.interpolate.interpn((VoxelX_source,VoxelY_source,VoxelZ_source), contour, xi, method='nearest', fill_value=0, bounds_error=False).astype(bool).reshape(target_shape).transpose(1,0,2)
95
+ Contour.Mask_PixelSpacing = newvoxelsize
96
+ Contour.Mask_GridSize = list(contour.shape)
97
+ Contour.NumVoxels = Contour.Mask_GridSize[0] * Contour.Mask_GridSize[1] * Contour.Mask_GridSize[2]
98
+ Contour.Mask = contour
99
+ self.Contours[i]=Contour
100
+
101
+
102
+ def import_Dicom_struct(self, CT):
103
+ if(self.isLoaded == 1):
104
+ print("Warning: RTstruct " + self.SeriesInstanceUID + " is already loaded")
105
+ return
106
+ dcm = pydicom.dcmread(self.DcmFile)
107
+
108
+ self.CT_SeriesInstanceUID = CT.SeriesInstanceUID
109
+
110
+ for dcm_struct in dcm.StructureSetROISequence:
111
+ ReferencedROI_id = next((x for x, val in enumerate(dcm.ROIContourSequence) if val.ReferencedROINumber == dcm_struct.ROINumber), -1)
112
+ dcm_contour = dcm.ROIContourSequence[ReferencedROI_id]
113
+
114
+ Contour = ROIcontour()
115
+ Contour.SeriesInstanceUID = self.SeriesInstanceUID
116
+ Contour.ROIName = dcm_struct.ROIName
117
+ Contour.ROIDisplayColor = dcm_contour.ROIDisplayColor
118
+
119
+ #print("Import contour " + str(len(self.Contours)) + ": " + Contour.ROIName)
120
+
121
+ Contour.Mask = np.zeros((CT.GridSize[0], CT.GridSize[1], CT.GridSize[2]), dtype=np.bool)
122
+ Contour.Mask_GridSize = CT.GridSize
123
+ Contour.Mask_PixelSpacing = CT.PixelSpacing
124
+ Contour.Mask_Offset = CT.ImagePositionPatient
125
+ Contour.Mask_NumVoxels = CT.NumVoxels
126
+ Contour.ContourMask = np.zeros((CT.GridSize[0], CT.GridSize[1], CT.GridSize[2]), dtype=np.bool)
127
+
128
+ SOPInstanceUID_match = 1
129
+
130
+ if not hasattr(dcm_contour, 'ContourSequence'):
131
+ print("This structure has no attribute ContourSequence. Skipping ...")
132
+ continue
133
+
134
+ for dcm_slice in dcm_contour.ContourSequence:
135
+ Slice = {}
136
+
137
+ # list of Dicom coordinates
138
+ Slice["XY_dcm"] = list(zip( np.array(dcm_slice.ContourData[0::3]), np.array(dcm_slice.ContourData[1::3]) ))
139
+ Slice["Z_dcm"] = float(dcm_slice.ContourData[2])
140
+
141
+ # list of coordinates in the image frame
142
+ Slice["XY_img"] = list(zip( ((np.array(dcm_slice.ContourData[0::3]) - CT.ImagePositionPatient[0]) / CT.PixelSpacing[0]), ((np.array(dcm_slice.ContourData[1::3]) - CT.ImagePositionPatient[1]) / CT.PixelSpacing[1]) ))
143
+ Slice["Z_img"] = (Slice["Z_dcm"] - CT.ImagePositionPatient[2]) / CT.PixelSpacing[2]
144
+ Slice["Slice_id"] = int(round(Slice["Z_img"]))
145
+
146
+ # convert polygon to mask (based on matplotlib - slow)
147
+ #x, y = np.meshgrid(np.arange(CT.GridSize[0]), np.arange(CT.GridSize[1]))
148
+ #points = np.transpose((x.ravel(), y.ravel()))
149
+ #path = Path(Slice["XY_img"])
150
+ #mask = path.contains_points(points)
151
+ #mask = mask.reshape((CT.GridSize[0], CT.GridSize[1]))
152
+
153
+ # convert polygon to mask (based on PIL - fast)
154
+ img = Image.new('L', (CT.GridSize[0], CT.GridSize[1]), 0)
155
+ if(len(Slice["XY_img"]) > 1): ImageDraw.Draw(img).polygon(Slice["XY_img"], outline=1, fill=1)
156
+ mask = np.array(img)
157
+ Contour.Mask[:,:,Slice["Slice_id"]] = np.logical_or(Contour.Mask[:,:,Slice["Slice_id"]], mask)
158
+
159
+ # do the same, but only keep contour in the mask
160
+ img = Image.new('L', (CT.GridSize[0], CT.GridSize[1]), 0)
161
+ if(len(Slice["XY_img"]) > 1): ImageDraw.Draw(img).polygon(Slice["XY_img"], outline=1, fill=0)
162
+ mask = np.array(img)
163
+ Contour.ContourMask[:,:,Slice["Slice_id"]] = np.logical_or(Contour.ContourMask[:,:,Slice["Slice_id"]], mask)
164
+
165
+ Contour.ContourSequence.append(Slice)
166
+
167
+ # check if the contour sequence is imported on the correct CT slice:
168
+ if(hasattr(dcm_slice, 'ContourImageSequence') and CT.SOPInstanceUIDs[Slice["Slice_id"]] != dcm_slice.ContourImageSequence[0].ReferencedSOPInstanceUID):
169
+ SOPInstanceUID_match = 0
170
+
171
+ if SOPInstanceUID_match != 1:
172
+ print("WARNING: some SOPInstanceUIDs don't match during importation of " + Contour.ROIName + " contour on CT image")
173
+
174
+ self.Contours.append(Contour)
175
+ self.NumContours += 1
176
+ #print("self.NumContours",self.NumContours, len(self.Contours))
177
+ self.isLoaded = 1
178
+
179
+ def load_from_nii(self, struct_nii_path, rtstruct_labels, rtstruct_colors):
180
+
181
+ # load the nii image
182
+ struct_nib = nib.load(struct_nii_path)
183
+ struct_data = struct_nib.get_fdata()
184
+
185
+ # get contourexists from header
186
+ if len(struct_nib.header.extensions)==0:
187
+ contoursexist = []
188
+ else:
189
+ contoursexist = list(struct_nib.header.extensions[0].get_content())
190
+
191
+ # get number of rois in struct_data
192
+ # for nii with consecutive integers
193
+ #roinumbers = np.unique(struct_data)
194
+ # for nii with power of 2 format
195
+ roinumbers = list(np.arange(np.floor(np.log2(np.max(struct_data))).astype(int)+1)) # CAREFUL WITH THIS LINE, MIGHT NOT WORK ALWAYS IF WE HAVE OVERLAP OF
196
+ nb_rois_in_struct = len(roinumbers)
197
+
198
+ # check that they match
199
+ if len(contoursexist)!=0 and (not len(rtstruct_labels) == len(contoursexist) == nb_rois_in_struct):
200
+ #raise TypeError("The number or struct labels, contoursexist, and masks in struct.nii.gz is not the same")
201
+ raise Warning("The number or struct labels, contoursexist, and estimated masks in struct.nii.gz is not the same. Taking len(contoursexist) as number of rois")
202
+ self.NumContours = len(contoursexist)
203
+ else:
204
+ self.NumContours = nb_rois_in_struct
205
+
206
+ # fill in contours
207
+ #TODO fill in ContourSequence and ContourData to be faster later in writeDicomRTstruct
208
+ for c in range(self.NumContours):
209
+
210
+ Contour = ROIcontour()
211
+ Contour.SeriesInstanceUID = self.SeriesInstanceUID
212
+ Contour.ROIName = rtstruct_labels[c]
213
+ if rtstruct_colors[c] == None:
214
+ Contour.ROIDisplayColor = [0, 0, 255] # default color is blue
215
+ else:
216
+ Contour.ROIDisplayColor = rtstruct_colors[c]
217
+ if len(contoursexist)!=0 and contoursexist[c] == 0:
218
+ Contour.Mask = np.zeros((struct_nib.header['dim'][1], struct_nib.header['dim'][2], struct_nib.header['dim'][3]), dtype=np.bool_)
219
+ else:
220
+ Contour.Mask = np.bitwise_and(struct_data.astype(int), 2 ** c).astype(bool)
221
+ #TODO enable option for consecutive integers masks?
222
+ Contour.Mask_GridSize = [struct_nib.header['dim'][1], struct_nib.header['dim'][2], struct_nib.header['dim'][3]]
223
+ Contour.Mask_PixelSpacing = [struct_nib.header['pixdim'][1], struct_nib.header['pixdim'][2], struct_nib.header['pixdim'][3]]
224
+ Contour.Mask_Offset = [struct_nib.header['qoffset_x'], struct_nib.header['qoffset_y'], struct_nib.header['qoffset_z']]
225
+ Contour.Mask_NumVoxels = struct_nib.header['dim'][1].astype(int) * struct_nib.header['dim'][2].astype(int) * struct_nib.header['dim'][3].astype(int)
226
+ # Contour.ContourMask --> this should be only the contour, so far we don't need it so I'll skip it
227
+
228
+ # apend to self
229
+ self.Contours.append(Contour)
230
+
231
+
232
+ def export_Dicom(self, refCT, outputFile):
233
+
234
+ # meta data
235
+
236
+ # generate UID
237
+ #uid_base = '' #TODO define one for us if we want? Siri is using: uid_base='1.2.826.0.1.3680043.10.230.',
238
+ # personal UID, applied for via https://www.medicalconnections.co.uk/FreeUID/
239
+
240
+ SOPInstanceUID = pydicom.uid.generate_uid() #TODO verify this! Siri was using a uid_base, this line is taken from OpenTPS writeRTPlan
241
+ #SOPInstanceUID = pydicom.uid.generate_uid('1.2.840.10008.5.1.4.1.1.481.3.') # siri's version
242
+
243
+ meta = pydicom.dataset.FileMetaDataset()
244
+ meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3' # UID class for RTSTRUCT
245
+ meta.MediaStorageSOPInstanceUID = SOPInstanceUID
246
+ # meta.ImplementationClassUID = uid_base + '1.1.1' # Siri's
247
+ meta.ImplementationClassUID = '1.2.250.1.59.3.0.3.5.0' # from OpenREGGUI
248
+ meta.TransferSyntaxUID = '1.2.840.10008.1.2' # Siri's and OpenREGGUI
249
+ meta.FileMetaInformationGroupLength = 188 # from Siri
250
+ # meta.ImplementationVersionName = 'DCIE 2.2' # from Siri
251
+
252
+
253
+ # Main data elements - only required fields, optional fields like StudyDescription are not included for simplicity
254
+ ds = pydicom.dataset.FileDataset(outputFile, {}, file_meta=meta, preamble=b"\0" * 128) # preamble is taken from this example https://pydicom.github.io/pydicom/dev/auto_examples/input_output/plot_write_dicom.html#sphx-glr-auto-examples-input-output-plot-write-dicom-py
255
+
256
+ # Patient info - will take it from the referenced CT image
257
+ ds.PatientName = refCT.PatientInfo.PatientName
258
+ ds.PatientID = refCT.PatientInfo.PatientID
259
+ ds.PatientBirthDate = refCT.PatientInfo.PatientBirthDate
260
+ ds.PatientSex = refCT.PatientInfo.PatientSex
261
+
262
+ # General Study
263
+ dt = datetime.datetime.now()
264
+ ds.StudyDate = dt.strftime('%Y%m%d')
265
+ ds.StudyTime = dt.strftime('%H%M%S.%f')
266
+ ds.AccessionNumber = '1' # A RIS/PACS (Radiology Information System/picture archiving and communication system) generated number that identifies the order for the Study.
267
+ ds.ReferringPhysicianName = 'NA'
268
+ ds.StudyInstanceUID = refCT.StudyInfo.StudyInstanceUID # get from reference CT to indicate that they belong to the same study
269
+ ds.StudyID = refCT.StudyInfo.StudyID # get from reference CT to indicate that they belong to the same study
270
+
271
+ # RT Series
272
+ #ds.SeriesDate # optional
273
+ #ds.SeriesTime # optional
274
+ ds.Modality = 'RTSTRUCT'
275
+ ds.SeriesDescription = 'AI-predicted' + dt.strftime('%Y%m%d') + dt.strftime('%H%M%S.%f')
276
+ ds.OperatorsName = 'MIRO AI team'
277
+ ds.SeriesInstanceUID = pydicom.uid.generate_uid() # if we have a uid_base --> pydicom.uid.generate_uid(uid_base)
278
+ ds.SeriesNumber = '1'
279
+
280
+ # General Equipment
281
+ ds.Manufacturer = 'MIRO lab'
282
+ #ds.InstitutionName = 'MIRO lab' # optional
283
+ #ds.ManufacturerModelName = 'nnUNet' # optional, but can be a good tag to insert the model information or label
284
+ #ds.SoftwareVersions # optional, but can be used to insert the version of the code in PARROT or the version of the model
285
+
286
+ # Frame of Reference
287
+ ds.FrameOfReferenceUID = refCT.FrameOfReferenceUID
288
+ ds.PositionReferenceIndicator = '' # empty if unknown - info here https://dicom.innolitics.com/ciods/rt-structure-set/frame-of-reference/00201040
289
+
290
+ # Structure Set
291
+ ds.StructureSetLabel = 'AI predicted' # do not use - or spetial characters or the Dicom Validation in Raystation will give a warning
292
+ #ds.StructureSetName # optional
293
+ #ds.StructureSetDescription # optional
294
+ ds.StructureSetDate = dt.strftime('%Y%m%d')
295
+ ds.StructureSetTime = dt.strftime('%H%M%S.%f')
296
+ ds.ReferencedFrameOfReferenceSequence = pydicom.Sequence()# optional
297
+ # we assume there is only one, the CT
298
+ dssr = pydicom.Dataset()
299
+ dssr.FrameOfReferenceUID = refCT.FrameOfReferenceUID
300
+ dssr.RTReferencedStudySequence = pydicom.Sequence()
301
+ # fill in sequence
302
+ dssr_refStudy = pydicom.Dataset()
303
+ dssr_refStudy.ReferencedSOPClassUID = '1.2.840.10008.3.1.2.3.1' # Study Management Detached
304
+ dssr_refStudy.ReferencedSOPInstanceUID = refCT.StudyInfo.StudyInstanceUID
305
+ dssr_refStudy.RTReferencedSeriesSequence = pydicom.Sequence()
306
+ #initialize
307
+ dssr_refStudy_series = pydicom.Dataset()
308
+ dssr_refStudy_series.SeriesInstanceUID = refCT.SeriesInstanceUID
309
+ dssr_refStudy_series.ContourImageSequence = pydicom.Sequence()
310
+ # loop over slices of CT
311
+ for slc in range(len(refCT.SOPInstanceUIDs)):
312
+ dssr_refStudy_series_slc = pydicom.Dataset()
313
+ dssr_refStudy_series_slc.ReferencedSOPClassUID = refCT.SOPClassUID
314
+ dssr_refStudy_series_slc.ReferencedSOPInstanceUID = refCT.SOPInstanceUIDs[slc]
315
+ # append
316
+ dssr_refStudy_series.ContourImageSequence.append(dssr_refStudy_series_slc)
317
+
318
+ # append
319
+ dssr_refStudy.RTReferencedSeriesSequence.append(dssr_refStudy_series)
320
+ # append
321
+ dssr.RTReferencedStudySequence.append(dssr_refStudy)
322
+ #append
323
+ ds.ReferencedFrameOfReferenceSequence.append(dssr)
324
+ #
325
+ ds.StructureSetROISequence = pydicom.Sequence()
326
+ # loop over the ROIs to fill in the fields
327
+ for iroi in range(self.NumContours):
328
+ # initialize the Dataset
329
+ dssr = pydicom.Dataset()
330
+ dssr.ROINumber = iroi + 1 # because iroi starts at zero and ROINumber cannot be zero
331
+ dssr.ReferencedFrameOfReferenceUID = ds.FrameOfReferenceUID # coming from refCT
332
+ dssr.ROIName = self.Contours[iroi].ROIName
333
+ #dssr.ROIDescription # optional
334
+ dssr.ROIGenerationAlgorithm = 'AUTOMATIC' # can also be 'SEMIAUTOMATIC' OR 'MANUAL', info here https://dicom.innolitics.com/ciods/rt-structure-set/structure-set/30060020/30060036
335
+ #TODO enable a function to tell us which type of GenerationAlgorithm we have
336
+ ds.StructureSetROISequence.append(dssr)
337
+
338
+ # delete to remove space
339
+ del dssr
340
+
341
+ #TODO merge all loops into one to be faster, although like this the code is easier to follow I find
342
+
343
+ # ROI Contour
344
+ ds.ROIContourSequence = pydicom.Sequence()
345
+ # loop over the ROIs to fill in the fields
346
+ for iroi in range(self.NumContours):
347
+ # initialize the Dataset
348
+ dssr = pydicom.Dataset()
349
+ dssr.ROIDisplayColor = self.Contours[iroi].ROIDisplayColor
350
+ dssr.ReferencedROINumber = iroi + 1 # because iroi starts at zero and ReferencedROINumber cannot be zero
351
+ dssr.ContourSequence = pydicom.Sequence()
352
+ # mask to polygon
353
+ polygonMeshList = self.Contours[iroi].getROIContour()
354
+ # get z vector
355
+ z_coords = list(np.arange(self.Contours[iroi].Mask_Offset[2],self.Contours[iroi].Mask_Offset[2]+self.Contours[iroi].Mask_GridSize[2]*self.Contours[iroi].Mask_PixelSpacing[2], self.Contours[iroi].Mask_PixelSpacing[2]))
356
+ # loop over the polygonMeshList to fill in ContourSequence
357
+ for polygon in polygonMeshList:
358
+
359
+ # initialize the Dataset
360
+ dssr_slc = pydicom.Dataset()
361
+ dssr_slc.ContourGeometricType = 'CLOSED_PLANAR' # can also be 'POINT', 'OPEN_PLANAR', 'OPEN_NONPLANAR', info here https://dicom.innolitics.com/ciods/rt-structure-set/roi-contour/30060039/30060040/30060042
362
+ #TODO enable the proper selection of the ContourGeometricType
363
+
364
+ # fill in contour points and data
365
+ dssr_slc.NumberOfContourPoints = len(polygon[0::3])
366
+ #dssr_slc.ContourNumber # optional
367
+ dssr_slc.ContourData = polygon
368
+
369
+ #get slice
370
+ polygon_z = polygon[2]
371
+ slc = z_coords.index(polygon_z)
372
+ # fill in ContourImageSequence
373
+ dssr_slc.ContourImageSequence = pydicom.Sequence() # Sequence of images containing the contour
374
+ # in our case, we assume we only have one, the reference CT (refCT)
375
+ dssr_slc_ref = pydicom.Dataset()
376
+ dssr_slc_ref.ReferencedSOPClassUID = refCT.SOPClassUID
377
+ dssr_slc_ref.ReferencedSOPInstanceUID = refCT.SOPInstanceUIDs[slc]
378
+ dssr_slc.ContourImageSequence.append(dssr_slc_ref)
379
+
380
+ # append Dataset to Sequence
381
+ dssr.ContourSequence.append(dssr_slc)
382
+
383
+ # append Dataset
384
+ ds.ROIContourSequence.append(dssr)
385
+
386
+ # RT ROI Observations
387
+ ds.RTROIObservationsSequence = pydicom.Sequence()
388
+ # loop over the ROIs to fill in the fields
389
+ for iroi in range(self.NumContours):
390
+ # initialize the Dataset
391
+ dssr = pydicom.Dataset()
392
+ dssr.ObservationNumber = iroi + 1 # because iroi starts at zero and ReferencedROINumber cannot be zero
393
+ dssr.ReferencedROINumber = iroi + 1 ## because iroi starts at zero and ReferencedROINumber cannot be zero
394
+ dssr.ROIObservationLabel = self.Contours[iroi].ROIName #optional
395
+ dssr.RTROIInterpretedType = 'ORGAN' # we can have many types, see here https://dicom.innolitics.com/ciods/rt-structure-set/rt-roi-observations/30060080/300600a4
396
+ # TODO enable a better fill in of the RTROIInterpretedType
397
+ dssr.ROIInterpreter = '' # empty if unknown
398
+ # append Dataset
399
+ ds.RTROIObservationsSequence.append(dssr)
400
+
401
+ # Approval
402
+ ds.ApprovalStatus = 'UNAPPROVED'#'APPROVED'
403
+ # if ds.ApprovalStatus = 'APPROVED', then we need to fill in the reviewer information
404
+ #ds.ReviewDate = dt.strftime('%Y%m%d')
405
+ #ds.ReviewTime = dt.strftime('%H%M%S.%f')
406
+ #ds.ReviewerName = 'MIRO AI team'
407
+
408
+ # SOP common
409
+ ds.SpecificCharacterSet = 'ISO_IR 100' # conditionally required - see info here https://dicom.innolitics.com/ciods/rt-structure-set/sop-common/00080005
410
+ #ds.InstanceCreationDate # optional
411
+ #ds.InstanceCreationTime # optional
412
+ ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3' #RTSTRUCT file
413
+ ds.SOPInstanceUID = SOPInstanceUID# Siri's --> pydicom.uid.generate_uid(uid_base)
414
+ #ds.InstanceNumber # optional
415
+
416
+ # save dicom file
417
+ print("Export dicom RTSTRUCT: " + outputFile)
418
+ ds.save_as(outputFile)
419
+
420
+
421
+
422
+
423
+ class ROIcontour:
424
+
425
+ def __init__(self):
426
+ self.SeriesInstanceUID = ""
427
+ self.ROIName = ""
428
+ self.ContourSequence = []
429
+
430
+ def getROIContour(self): # this is from new version of OpenTPS, I(ana) have adapted it to work with old version of self.Contours[i].Mask
431
+
432
+ try:
433
+ from skimage.measure import label, find_contours
434
+ from skimage.segmentation import find_boundaries
435
+ except:
436
+ print('Module skimage (scikit-image) not installed, ROIMask cannot be converted to ROIContour')
437
+ return 0
438
+
439
+ polygonMeshList = []
440
+ for zSlice in range(self.Mask.shape[2]):
441
+
442
+ labeledImg, numberOfLabel = label(self.Mask[:, :, zSlice], return_num=True)
443
+
444
+ for i in range(1, numberOfLabel + 1):
445
+
446
+ singleLabelImg = labeledImg == i
447
+ contours = find_contours(singleLabelImg.astype(np.uint8), level=0.6)
448
+
449
+ if len(contours) > 0:
450
+
451
+ if len(contours) == 2:
452
+
453
+ ## use a different threshold in the case of an interior contour
454
+ contours2 = find_contours(singleLabelImg.astype(np.uint8), level=0.4)
455
+
456
+ interiorContour = contours2[1]
457
+ polygonMesh = []
458
+ for point in interiorContour:
459
+
460
+ #xCoord = np.round(point[1]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] # original Damien in OpenTPS
461
+ #yCoord = np.round(point[0]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] # original Damien in OpenTPS
462
+ xCoord = np.round(point[1]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] #AB
463
+ yCoord = np.round(point[0]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] #AB
464
+ zCoord = zSlice * self.Mask_PixelSpacing[2] + self.Mask_Offset[2]
465
+
466
+ #polygonMesh.append(yCoord) # original Damien in OpenTPS
467
+ #polygonMesh.append(xCoord) # original Damien in OpenTPS
468
+ polygonMesh.append(xCoord) # AB
469
+ polygonMesh.append(yCoord) # AB
470
+ polygonMesh.append(zCoord)
471
+
472
+ polygonMeshList.append(polygonMesh)
473
+
474
+ contour = contours[0]
475
+
476
+ polygonMesh = []
477
+ for point in contour:
478
+
479
+ #xCoord = np.round(point[1]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] # original Damien in OpenTPS
480
+ #yCoord = np.round(point[0]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] # original Damien in OpenTPS
481
+ xCoord = np.round(point[1]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] #AB
482
+ yCoord = np.round(point[0]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] #AB
483
+ zCoord = zSlice * self.Mask_PixelSpacing[2] + self.Mask_Offset[2]
484
+
485
+ polygonMesh.append(xCoord) # AB
486
+ polygonMesh.append(yCoord) # AB
487
+ #polygonMesh.append(yCoord) # original Damien in OpenTPS
488
+ #polygonMesh.append(xCoord) # original Damien in OpenTPS
489
+ polygonMesh.append(zCoord)
490
+
491
+ polygonMeshList.append(polygonMesh)
492
+
493
+ ## I (ana) will comment this part since I will not use the class ROIContour for simplicity ###
494
+ #from opentps.core.data._roiContour import ROIContour ## this is done here to avoir circular imports issue
495
+ #contour = ROIContour(name=self.ROIName, displayColor=self.ROIDisplayColor)
496
+ #contour.polygonMesh = polygonMeshList
497
+
498
+ #return contour
499
+
500
+ # instead returning the polygonMeshList directly
501
+ return polygonMeshList
postprocessing.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from monai.transforms import MapTransform, Transform
2
+ from monai.config import KeysCollection
3
+ from typing import Dict, Hashable, Mapping, Optional, Type, Union, Sequence
4
+ import torch, sys
5
+ from pathlib import Path
6
+ from monai.config import DtypeLike, KeysCollection, PathLike
7
+ from monai.data import image_writer
8
+ from monai.transforms.transform import MapTransform
9
+ from monai.utils import GridSamplePadMode, ensure_tuple, ensure_tuple_rep, optional_import
10
+ from monai.data.meta_tensor import MetaTensor
11
+ from monai.data.folder_layout import FolderLayout
12
+ from pydoc import locate
13
+ import numpy as np
14
+ import nibabel as nib, os
15
+ from monai.utils.enums import PostFix
16
+
17
+ DEFAULT_POST_FIX = PostFix.meta()
18
+
19
+ def set_header_info(nii_file, voxelsize, image_position_patient, contours_exist = None):
20
+ nii_file.header['pixdim'][1] = voxelsize[0]
21
+ nii_file.header['pixdim'][2] = voxelsize[1]
22
+ nii_file.header['pixdim'][3] = voxelsize[2]
23
+
24
+ #affine - voxelsize
25
+ nii_file.affine[0][0] = voxelsize[0]
26
+ nii_file.affine[1][1] = voxelsize[1]
27
+ nii_file.affine[2][2] = voxelsize[2]
28
+ #affine - imagecorner
29
+ nii_file.affine[0][3] = image_position_patient[0]
30
+ nii_file.affine[1][3] = image_position_patient[1]
31
+ nii_file.affine[2][3] = image_position_patient[2]
32
+ if contours_exist:
33
+ nii_file.header.extensions.append(nib.nifti1.Nifti1Extension(0, bytearray(contours_exist)))
34
+ return nii_file
35
+
36
+
37
+ def add_contours_exist(preddir, refCT):
38
+ img = nib.load(os.path.join(preddir, 'RTStruct.nii.gz'))
39
+ data = img.get_fdata().astype(int)
40
+ contours_exist = []
41
+ data_one_hot = np.zeros(data.shape[:3])
42
+ # We remove the first channel as it is the background
43
+ for i in range(data.shape[-1]-1):
44
+ if np.count_nonzero(data[:,:,:,i+1])>0:
45
+ contours_exist.append(1)
46
+ data_one_hot+=np.where(data[:,:,:,i+1]==1,2**i,0)
47
+ else:
48
+ contours_exist.append(0)
49
+
50
+ data_one_hot_nii = nib.Nifti1Image(data_one_hot, affine=np.eye(4))
51
+ data_one_hot_nii = set_header_info(data_one_hot_nii, voxelsize=np.array(refCT.PixelSpacing), image_position_patient=refCT.ImagePositionPatient, contours_exist=contours_exist)
52
+ nib.save(data_one_hot_nii,os.path.join(preddir, 'RTStruct.nii.gz'))
53
+
54
+ class SaveImaged(MapTransform):
55
+ """
56
+ Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`.
57
+
58
+ Note:
59
+ Image should be channel-first shape: [C,H,W,[D]].
60
+ If the data is a patch of big image, will append the patch index to filename.
61
+
62
+ Args:
63
+ keys: keys of the corresponding items to be transformed.
64
+ See also: :py:class:`monai.transforms.compose.MapTransform`
65
+ meta_keys: explicitly indicate the key of the corresponding metadata dictionary.
66
+ For example, for data with key `image`, the metadata by default is in `image_meta_dict`.
67
+ The metadata is a dictionary contains values such as filename, original_shape.
68
+ This argument can be a sequence of string, map to the `keys`.
69
+ If `None`, will try to construct meta_keys by `key_{meta_key_postfix}`.
70
+ meta_key_postfix: if `meta_keys` is `None`, use `key_{meta_key_postfix}` to retrieve the metadict.
71
+ output_dir: output image directory.
72
+ output_postfix: a string appended to all output file names, default to `trans`.
73
+ output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.
74
+ output_dtype: data type for saving data. Defaults to ``np.float32``.
75
+ resample: whether to resample image (if needed) before saving the data array,
76
+ based on the `spatial_shape` (and `original_affine`) from metadata.
77
+ mode: This option is used when ``resample=True``. Defaults to ``"nearest"``.
78
+ Depending on the writers, the possible options are:
79
+
80
+ - {``"bilinear"``, ``"nearest"``, ``"bicubic"``}.
81
+ See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
82
+ - {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}.
83
+ See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
84
+
85
+ padding_mode: This option is used when ``resample = True``. Defaults to ``"border"``.
86
+ Possible options are {``"zeros"``, ``"border"``, ``"reflection"``}
87
+ See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
88
+ scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
89
+ [0, 255] (uint8) or [0, 65535] (uint16). Default is `None` (no scaling).
90
+ dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.
91
+ if None, use the data type of input data. To be compatible with other modules,
92
+ output_dtype: data type for saving data. Defaults to ``np.float32``.
93
+ it's used for NIfTI format only.
94
+ allow_missing_keys: don't raise exception if key is missing.
95
+ squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel
96
+ has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and
97
+ then if C==1, it will be saved as (H,W,D). If D is also 1, it will be saved as (H,W). If `false`,
98
+ image will always be saved as (H,W,D,C).
99
+ data_root_dir: if not empty, it specifies the beginning parts of the input file's
100
+ absolute path. It's used to compute `input_file_rel_path`, the relative path to the file from
101
+ `data_root_dir` to preserve folder structure when saving in case there are files in different
102
+ folders with the same file names. For example, with the following inputs:
103
+
104
+ - input_file_name: `/foo/bar/test1/image.nii`
105
+ - output_postfix: `seg`
106
+ - output_ext: `.nii.gz`
107
+ - output_dir: `/output`
108
+ - data_root_dir: `/foo/bar`
109
+
110
+ The output will be: /output/test1/image/image_seg.nii.gz
111
+
112
+ separate_folder: whether to save every file in a separate folder. For example: for the input filename
113
+ `image.nii`, postfix `seg` and folder_path `output`, if `separate_folder=True`, it will be saved as:
114
+ `output/image/image_seg.nii`, if `False`, saving as `output/image_seg.nii`. Default to `True`.
115
+ print_log: whether to print logs when saving. Default to `True`.
116
+ output_format: an optional string to specify the output image writer.
117
+ see also: `monai.data.image_writer.SUPPORTED_WRITERS`.
118
+ writer: a customised `monai.data.ImageWriter` subclass to save data arrays.
119
+ if `None`, use the default writer from `monai.data.image_writer` according to `output_ext`.
120
+ if it's a string, it's treated as a class name or dotted path;
121
+ the supported built-in writer classes are ``"NibabelWriter"``, ``"ITKWriter"``, ``"PILWriter"``.
122
+
123
+ """
124
+
125
+ def __init__(
126
+ self,
127
+ keys: KeysCollection,
128
+ meta_keys: Optional[KeysCollection] = None,
129
+ meta_key_postfix: str = DEFAULT_POST_FIX,
130
+ output_dir: Union[Path, str] = "./",
131
+ output_postfix: str = "trans",
132
+ output_ext: str = ".nii.gz",
133
+ resample: bool = True,
134
+ mode: str = "nearest",
135
+ padding_mode: str = GridSamplePadMode.BORDER,
136
+ scale: Optional[int] = None,
137
+ dtype: DtypeLike = np.float64,
138
+ output_dtype: DtypeLike = np.float32,
139
+ allow_missing_keys: bool = False,
140
+ squeeze_end_dims: bool = True,
141
+ data_root_dir: str = "",
142
+ separate_folder: bool = True,
143
+ print_log: bool = True,
144
+ output_format: str = "",
145
+ writer: Union[Type[image_writer.ImageWriter], str, None] = None,
146
+ ) -> None:
147
+ super().__init__(keys, allow_missing_keys)
148
+ self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
149
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
150
+ self.saver = SaveImage(
151
+ output_dir=output_dir,
152
+ output_postfix=output_postfix,
153
+ output_ext=output_ext,
154
+ resample=resample,
155
+ mode=mode,
156
+ padding_mode=padding_mode,
157
+ scale=scale,
158
+ dtype=dtype,
159
+ output_dtype=output_dtype,
160
+ squeeze_end_dims=squeeze_end_dims,
161
+ data_root_dir=data_root_dir,
162
+ separate_folder=separate_folder,
163
+ print_log=print_log,
164
+ output_format=output_format,
165
+ writer=writer,
166
+ )
167
+
168
+ def set_options(self, init_kwargs=None, data_kwargs=None, meta_kwargs=None, write_kwargs=None):
169
+ self.saver.set_options(init_kwargs, data_kwargs, meta_kwargs, write_kwargs)
170
+
171
+ def __call__(self, data):
172
+ d = dict(data)
173
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
174
+ if meta_key is None and meta_key_postfix is not None:
175
+ meta_key = f"{key}_{meta_key_postfix}"
176
+ meta_data = d.get(meta_key) if meta_key is not None else None
177
+ self.saver(img=d[key], meta_data=meta_data)
178
+ return d
179
+
180
+
181
+ class SaveImage(Transform):
182
+ """
183
+ Save the image (in the form of torch tensor or numpy ndarray) and metadata dictionary into files.
184
+
185
+ The name of saved file will be `{input_image_name}_{output_postfix}{output_ext}`,
186
+ where the `input_image_name` is extracted from the provided metadata dictionary.
187
+ If no metadata provided, a running index starting from 0 will be used as the filename prefix.
188
+
189
+ Args:
190
+ output_dir: output image directory.
191
+ output_postfix: a string appended to all output file names, default to `trans`.
192
+ output_ext: output file extension name.
193
+ output_dtype: data type for saving data. Defaults to ``np.float32``.
194
+ resample: whether to resample image (if needed) before saving the data array,
195
+ based on the `spatial_shape` (and `original_affine`) from metadata.
196
+ mode: This option is used when ``resample=True``. Defaults to ``"nearest"``.
197
+ Depending on the writers, the possible options are
198
+
199
+ - {``"bilinear"``, ``"nearest"``, ``"bicubic"``}.
200
+ See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
201
+ - {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}.
202
+ See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
203
+
204
+ padding_mode: This option is used when ``resample = True``. Defaults to ``"border"``.
205
+ Possible options are {``"zeros"``, ``"border"``, ``"reflection"``}
206
+ See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
207
+ scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
208
+ [0, 255] (uint8) or [0, 65535] (uint16). Default is `None` (no scaling).
209
+ dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.
210
+ if None, use the data type of input data. To be compatible with other modules,
211
+ squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel
212
+ has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and
213
+ then if C==1, it will be saved as (H,W,D). If D is also 1, it will be saved as (H,W). If `false`,
214
+ image will always be saved as (H,W,D,C).
215
+ data_root_dir: if not empty, it specifies the beginning parts of the input file's
216
+ absolute path. It's used to compute `input_file_rel_path`, the relative path to the file from
217
+ `data_root_dir` to preserve folder structure when saving in case there are files in different
218
+ folders with the same file names. For example, with the following inputs:
219
+
220
+ - input_file_name: `/foo/bar/test1/image.nii`
221
+ - output_postfix: `seg`
222
+ - output_ext: `.nii.gz`
223
+ - output_dir: `/output`
224
+ - data_root_dir: `/foo/bar`
225
+
226
+ The output will be: /output/test1/image/image_seg.nii.gz
227
+
228
+ separate_folder: whether to save every file in a separate folder. For example: for the input filename
229
+ `image.nii`, postfix `seg` and folder_path `output`, if `separate_folder=True`, it will be saved as:
230
+ `output/image/image_seg.nii`, if `False`, saving as `output/image_seg.nii`. Default to `True`.
231
+ print_log: whether to print logs when saving. Default to `True`.
232
+ output_format: an optional string of filename extension to specify the output image writer.
233
+ see also: `monai.data.image_writer.SUPPORTED_WRITERS`.
234
+ writer: a customised `monai.data.ImageWriter` subclass to save data arrays.
235
+ if `None`, use the default writer from `monai.data.image_writer` according to `output_ext`.
236
+ if it's a string, it's treated as a class name or dotted path (such as ``"monai.data.ITKWriter"``);
237
+ the supported built-in writer classes are ``"NibabelWriter"``, ``"ITKWriter"``, ``"PILWriter"``.
238
+ channel_dim: the index of the channel dimension. Default to `0`.
239
+ `None` to indicate no channel dimension.
240
+ """
241
+
242
+ def __init__(
243
+ self,
244
+ output_dir: PathLike = "./",
245
+ output_postfix: str = "trans",
246
+ output_ext: str = ".nii.gz",
247
+ output_dtype: DtypeLike = np.float32,
248
+ resample: bool = True,
249
+ mode: str = "nearest",
250
+ padding_mode: str = GridSamplePadMode.BORDER,
251
+ scale: Optional[int] = None,
252
+ dtype: DtypeLike = np.float64,
253
+ squeeze_end_dims: bool = True,
254
+ data_root_dir: PathLike = "",
255
+ separate_folder: bool = True,
256
+ print_log: bool = True,
257
+ output_format: str = "",
258
+ writer: Union[Type[image_writer.ImageWriter], str, None] = None,
259
+ channel_dim: Optional[int] = 0,
260
+ ) -> None:
261
+ self.folder_layout = FolderLayout(
262
+ output_dir=output_dir,
263
+ postfix=output_postfix,
264
+ extension=output_ext,
265
+ parent=separate_folder,
266
+ makedirs=True,
267
+ data_root_dir=data_root_dir,
268
+ )
269
+
270
+ self.output_ext = output_ext.lower() or output_format.lower()
271
+ if isinstance(writer, str):
272
+ writer_, has_built_in = optional_import("monai.data", name=f"{writer}") # search built-in
273
+ if not has_built_in:
274
+ writer_ = locate(f"{writer}") # search dotted path
275
+ if writer_ is None:
276
+ raise ValueError(f"writer {writer} not found")
277
+ writer = writer_
278
+ self.writers = image_writer.resolve_writer(self.output_ext) if writer is None else (writer,)
279
+ self.writer_obj = None
280
+
281
+ _output_dtype = output_dtype
282
+ if self.output_ext == ".png" and _output_dtype not in (np.uint8, np.uint16):
283
+ _output_dtype = np.uint8
284
+ if self.output_ext == ".dcm" and _output_dtype not in (np.uint8, np.uint16):
285
+ _output_dtype = np.uint8
286
+ self.init_kwargs = {"output_dtype": _output_dtype, "scale": scale}
287
+ self.data_kwargs = {"squeeze_end_dims": squeeze_end_dims, "channel_dim": channel_dim}
288
+ self.meta_kwargs = {"resample": resample, "mode": mode, "padding_mode": padding_mode, "dtype": dtype}
289
+ self.write_kwargs = {"verbose": print_log}
290
+ self._data_index = 0
291
+
292
+ def set_options(self, init_kwargs=None, data_kwargs=None, meta_kwargs=None, write_kwargs=None):
293
+ """
294
+ Set the options for the underlying writer by updating the `self.*_kwargs` dictionaries.
295
+
296
+ The arguments correspond to the following usage:
297
+
298
+ - `writer = ImageWriter(**init_kwargs)`
299
+ - `writer.set_data_array(array, **data_kwargs)`
300
+ - `writer.set_metadata(meta_data, **meta_kwargs)`
301
+ - `writer.write(filename, **write_kwargs)`
302
+
303
+ """
304
+ if init_kwargs is not None:
305
+ self.init_kwargs.update(init_kwargs)
306
+ if data_kwargs is not None:
307
+ self.data_kwargs.update(data_kwargs)
308
+ if meta_kwargs is not None:
309
+ self.meta_kwargs.update(meta_kwargs)
310
+ if write_kwargs is not None:
311
+ self.write_kwargs.update(write_kwargs)
312
+
313
+
314
+ def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None):
315
+ """
316
+ Args:
317
+ img: target data content that save into file. The image should be channel-first, shape: `[C,H,W,[D]]`.
318
+ meta_data: key-value pairs of metadata corresponding to the data.
319
+ """
320
+ meta_data = img.meta if isinstance(img, MetaTensor) else meta_data
321
+ subject = "RTStruct"#meta_data["patient_name"] if meta_data else str(self._data_index)
322
+ patch_index = None#meta_data.get(Key.PATCH_INDEX, None) if meta_data else None
323
+ filename = self.folder_layout.filename(subject=f"{subject}", idx=patch_index)
324
+ if meta_data and len(ensure_tuple(meta_data.get("spatial_shape", ()))) == len(img.shape):
325
+ self.data_kwargs["channel_dim"] = None
326
+
327
+ err = []
328
+ for writer_cls in self.writers:
329
+ try:
330
+ writer_obj = writer_cls(**self.init_kwargs)
331
+ writer_obj.set_data_array(data_array=img, **self.data_kwargs)
332
+ writer_obj.set_metadata(meta_dict=meta_data, **self.meta_kwargs)
333
+ writer_obj.write(filename, **self.write_kwargs)
334
+ self.writer_obj = writer_obj
335
+ except Exception as e:
336
+ print('err',e)
337
+ else:
338
+ self._data_index += 1
339
+ return img
340
+ msg = "\n".join([f"{e}" for e in err])
341
+ raise RuntimeError(
342
+ f"{self.__class__.__name__} cannot find a suitable writer for {filename}.\n"
343
+ " Please install the writer libraries, see also the installation instructions:\n"
344
+ " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies.\n"
345
+ f" The current registered writers for {self.output_ext}: {self.writers}.\n{msg}"
346
+ )
347
+
predict.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import sys
4
+ import json
5
+ import torch
6
+
7
+ # +++++++++++++ Conversion imports +++++++++++++++++
8
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
9
+ sys.path.append(os.path.abspath(".."))
10
+ # +++++++++++++ Conversion imports +++++++++++++++++
11
+
12
+ from dicom_to_nii import convert_ct_dicom_to_nii
13
+ from nii_to_dicom import convert_nii_to_dicom
14
+
15
+
16
+ # AI MONAI libraries
17
+ from monai.networks.nets import UNet
18
+ from monai.networks.layers import Norm
19
+ from monai.inferers import sliding_window_inference
20
+ from monai.data import CacheDataset, DataLoader, Dataset, decollate_batch, NibabelReader
21
+ from monai.utils import first
22
+ from monai.transforms import (
23
+ EnsureChannelFirstd,
24
+ Compose,
25
+ CropForegroundd,
26
+ ScaleIntensityRanged,
27
+ Invertd,
28
+ AsDiscreted,
29
+ ThresholdIntensityd,
30
+ RemoveSmallObjectsd,
31
+ KeepLargestConnectedComponentd,
32
+ Activationsd
33
+ )
34
+ # Preprocessing
35
+ from preprocessing import LoadImaged
36
+ # Postprocessing
37
+ from postprocessing import SaveImaged, add_contours_exist
38
+ import matplotlib.pyplot as plt
39
+ import numpy as np
40
+ from utils import *
41
+
42
+
43
+ def predict(tempPath, patient_id, ctSeriesInstanceUID, runInterpreter):
44
+
45
+ # Important: Check the input parameters #################
46
+ if not patient_id or patient_id == "":
47
+ sys.exit("No Patient dataset loaded: Load the patient dataset in Study Management.")
48
+
49
+ if not ctSeriesInstanceUID or ctSeriesInstanceUID == "":
50
+ sys.exit("No CT series instance UID to load the CT images. Check for CT data in your study")
51
+
52
+ print("+++ tempath: ", tempPath)
53
+ print("+++ patient_id: ", patient_id)
54
+ print("+++ CT SeriesInstanceUID: ", ctSeriesInstanceUID)
55
+ print("+++ runInterpreter", runInterpreter)
56
+
57
+ # Important: Configure path ###########################
58
+ dir_base = os.path.join(tempPath, patient_id)
59
+ createdir(dir_base)
60
+ dir_ct_dicom = os.path.join(dir_base, 'ct_dicom')
61
+ createdir(dir_ct_dicom)
62
+ dir_ct_nii = os.path.join(dir_base, "ct_nii")
63
+ createdir(dir_ct_nii)
64
+ dir_prediction_nii = os.path.join(dir_base, 'prediction_nii')
65
+ createdir(dir_prediction_nii)
66
+ dir_prediction_dicom = os.path.join(dir_base, 'prediction_dicom')
67
+ createdir(dir_prediction_dicom)
68
+
69
+ # predicted files
70
+ predictedNiiFile = os.path.join(dir_prediction_nii, 'RTStruct.nii.gz')
71
+ predictedDicomFile = os.path.join(dir_prediction_dicom, 'predicted_rtstruct.dcm')
72
+
73
+ model_path = r'best_metric_model.pth'
74
+ if not os.path.exists(model_path):
75
+ sys.exit("Not found the trained model")
76
+
77
+ # Important: Configure path ###########################
78
+ print('** Use python interpreter: ', runInterpreter)
79
+ print('** Patient name: ', patient_id)
80
+ print('** CT Serial instance UID: ', ctSeriesInstanceUID)
81
+
82
+ downloadSeriesInstanceByModality(ctSeriesInstanceUID, dir_ct_dicom, "CT")
83
+ print("Loading CT from Orthanc done")
84
+
85
+ # Conversion DICOM to nii
86
+ # if not os.path.exists(os.path.join(".", dir_ct_nii, patient_id)):
87
+ # os.makedirs(os.path.join(dir_ct_nii, patient_id))
88
+
89
+ refCT = convert_ct_dicom_to_nii(dir_dicom = dir_ct_dicom, dir_nii = dir_ct_nii, outputname='ct.nii.gz', newvoxelsize = None)
90
+
91
+ print("Conversion DICOM to nii done")
92
+
93
+ # Dictionary with patient to predict
94
+ test_Data = [{'image':os.path.join(dir_ct_nii,'ct.nii.gz')}]
95
+
96
+ # Transformations
97
+ test_pretransforms = Compose(
98
+ [
99
+ LoadImaged(keys=["image"], reader = NibabelReader(), patientname=patient_id),
100
+ EnsureChannelFirstd(keys=["image"]),
101
+ ThresholdIntensityd(keys=["image"], threshold=1560, above=False, cval=1560),
102
+ ThresholdIntensityd(keys=["image"], threshold=-50, above=True, cval=-1000),
103
+ # MaskIntensityd(keys=['image'], mask_key="body"),
104
+ ScaleIntensityRanged(
105
+ keys=["image"], a_min=-1000, a_max=1560,
106
+ b_min=0.0, b_max=1.0, clip=True,
107
+ ),
108
+ CropForegroundd(keys=["image"], source_key="image")
109
+ ]
110
+ )
111
+ test_posttransforms = Compose(
112
+ [
113
+ Activationsd(keys="pred", softmax=True),
114
+ Invertd(
115
+ keys="pred", # invert the `pred` data field, also support multiple fields
116
+ transform=test_pretransforms,
117
+ orig_keys="image", # get the previously applied pre_transforms information on the `img` data field,
118
+ # then invert `pred` based on this information. we can use same info
119
+ # for multiple fields, also support different orig_keys for different fields
120
+ nearest_interp=False, # don't change the interpolation mode to "nearest" when inverting transforms
121
+ # to ensure a smooth output, then execute `AsDiscreted` transform
122
+ to_tensor=True, # convert to PyTorch Tensor after inverting
123
+ ),
124
+ AsDiscreted(keys="pred", argmax=True, to_onehot=2, threshold=0.5),
125
+ KeepLargestConnectedComponentd(keys="pred",is_onehot=True),
126
+ SaveImaged(keys="pred", output_postfix='', separate_folder=False, output_dir=dir_prediction_nii, resample=False)
127
+ ]
128
+ )
129
+
130
+ # Define DataLoader using MONAI, CacheDataset needs to be used
131
+ test_ds = CacheDataset(data=test_Data, transform=test_pretransforms)
132
+ test_loader = DataLoader(test_ds, batch_size=1, shuffle=True, num_workers=1)
133
+
134
+ # check_ds = Dataset(data=test_Data, transform=test_pretransforms)
135
+ # check_loader = DataLoader(check_ds, batch_size=1)
136
+
137
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
138
+ model_param = dict(
139
+ spatial_dims=3,
140
+ in_channels=1,
141
+ out_channels=2,
142
+ channels=(16, 32, 64, 128, 256),
143
+ strides=(2, 2, 2, 2),
144
+ num_res_units=2,
145
+ norm=Norm.BATCH
146
+ )
147
+ model = UNet(**model_param)
148
+ # trained_model_dict = torch.load(model_path, map_location=torch.device('cpu'))
149
+ trained_model_dict = torch.load(model_path, map_location=torch.device('cuda:0' if torch.cuda.is_available() else "cpu"))
150
+ model.load_state_dict(trained_model_dict)#['state_dict'])
151
+
152
+ # model.load_state_dict(torch.load(model_path))
153
+ model = model.to(device)
154
+ # print("MODEL",model)
155
+
156
+ model.eval()
157
+ d = first(test_loader)
158
+ images = d["image"].to(device)
159
+ d['pred'] = sliding_window_inference(inputs=images, roi_size=(96,96,64),sw_batch_size=1, predictor = model)
160
+ d['pred'] = [test_posttransforms(i) for i in decollate_batch(d)]
161
+
162
+ # model.cpu()
163
+
164
+ add_contours_exist(preddir = dir_prediction_nii, refCT = refCT)
165
+ # Conversion nii to DICOM
166
+ convert_nii_to_dicom(dicomctdir = dir_ct_dicom, predictedNiiFile = predictedNiiFile, predictedDicomFile = predictedDicomFile, predicted_structures=['BODY'], rtstruct_colors=[[255,0,0]], refCT = refCT)
167
+
168
+ print("Conversion nii to DICOM done")
169
+
170
+ # Transfer predicted DICOM to Orthanc
171
+ uploadDicomToOrthanc(predictedDicomFile)
172
+
173
+ print("Upload predicted result to Orthanc done")
174
+
175
+ print("Body Segmentation prediction done")
176
+
177
+ '''
178
+ Prediction parameters provided by the server. Select the parameters to be used for prediction:
179
+ [1] tempPath: The path where the predict.py is stored,
180
+ [2] patientname: python version,
181
+ [3] ctSeriesInstanceUID: Series instance UID for data set with modality = CT. To predict 'MR' modality data, retrieve the CT UID by the code (see Precision Code)
182
+ [4] rtStructSeriesInstanceUID: Series instance UID for modality = RTSTURCT
183
+ [5] regSeriesInstanceUID: Series instance UID for modality = REG,
184
+ [6] runInterpreter: The python version for the python environment
185
+ [7] oarList: only for dose predciton. For contour predicion oarList = []
186
+ [8] tvList: only for dose prediction. For contour prediction tvList = []
187
+ '''
188
+
189
+ if __name__ == '__main__':
190
+ predict(tempPath=sys.argv[1], patient_id=sys.argv[2], ctSeriesInstanceUID=sys.argv[3], runInterpreter=sys.argv[6])
191
+
192
+
preprocessing.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from monai.transforms import MapTransform, Transform
2
+ import numpy as np
3
+ from monai.config import KeysCollection
4
+ from typing import Dict, Hashable, Mapping, Optional, Type, Union, Sequence
5
+ import sys
6
+ from pathlib import Path
7
+ from monai.config import DtypeLike, KeysCollection
8
+ from monai.data.image_reader import ImageReader
9
+ from monai.transforms.transform import MapTransform
10
+ from monai.utils import ensure_tuple, ensure_tuple_rep
11
+ from monai.utils.enums import PostFix
12
+ from monai.data.meta_tensor import MetaTensor
13
+ from monai.transforms.utility.array import EnsureChannelFirst
14
+ from monai.utils import ImageMetaKey as Key
15
+ from monai.utils import ensure_tuple, ensure_tuple_rep, convert_to_dst_type
16
+ from monai.data import NibabelReader
17
+ from monai.config import DtypeLike, KeysCollection, PathLike, NdarrayOrTensor
18
+
19
+ DEFAULT_POST_FIX = PostFix.meta()
20
+
21
+ class LoadImaged(MapTransform):
22
+ """
23
+ Dictionary-based wrapper of :py:class:`monai.transforms.LoadImage`,
24
+ It can load both image data and metadata. When loading a list of files in one key,
25
+ the arrays will be stacked and a new dimension will be added as the first dimension
26
+ In this case, the metadata of the first image will be used to represent the stacked result.
27
+ The affine transform of all the stacked images should be same.
28
+ The output metadata field will be created as ``meta_keys`` or ``key_{meta_key_postfix}``.
29
+
30
+ If reader is not specified, this class automatically chooses readers
31
+ based on the supported suffixes and in the following order:
32
+
33
+ - User-specified reader at runtime when calling this loader.
34
+ - User-specified reader in the constructor of `LoadImage`.
35
+ - Readers from the last to the first in the registered list.
36
+ - Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
37
+ (npz, npy -> NumpyReader), (dcm, DICOM series and others -> ITKReader).
38
+
39
+ Please note that for png, jpg, bmp, and other 2D formats, readers often swap axis 0 and 1 after
40
+ loading the array because the `HW` definition for non-medical specific file formats is different
41
+ from other common medical packages.
42
+
43
+ Note:
44
+
45
+ - If `reader` is specified, the loader will attempt to use the specified readers and the default supported
46
+ readers. This might introduce overheads when handling the exceptions of trying the incompatible loaders.
47
+ In this case, it is therefore recommended setting the most appropriate reader as
48
+ the last item of the `reader` parameter.
49
+
50
+ See also:
51
+
52
+ - tutorial: https://github.com/Project-MONAI/tutorials/blob/master/modules/load_medical_images.ipynb
53
+
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ keys: KeysCollection,
59
+ reader: Optional[Union[ImageReader, str]] = None,
60
+ patientname: str='',
61
+ dtype: DtypeLike = np.float32,
62
+ meta_keys: Optional[KeysCollection] = None,
63
+ meta_key_postfix: str = DEFAULT_POST_FIX,
64
+ overwriting: bool = False,
65
+ image_only: bool = False,
66
+ ensure_channel_first: bool = False,
67
+ simple_keys: bool = False,
68
+ prune_meta_pattern: Optional[str] = None,
69
+ prune_meta_sep: str = ".",
70
+ allow_missing_keys: bool = False,
71
+ *args,
72
+ **kwargs,
73
+ ) -> None:
74
+ """
75
+ Args:
76
+ keys: keys of the corresponding items to be transformed.
77
+ See also: :py:class:`monai.transforms.compose.MapTransform`
78
+ reader: reader to load image file and metadata
79
+ - if `reader` is None, a default set of `SUPPORTED_READERS` will be used.
80
+ - if `reader` is a string, it's treated as a class name or dotted path
81
+ (such as ``"monai.data.ITKReader"``), the supported built-in reader classes are
82
+ ``"ITKReader"``, ``"NibabelReader"``, ``"NumpyReader"``.
83
+ a reader instance will be constructed with the `*args` and `**kwargs` parameters.
84
+ - if `reader` is a reader class/instance, it will be registered to this loader accordingly.
85
+ patientname: the patient name.
86
+ dtype: if not None, convert the loaded image data to this data type.
87
+ meta_keys: explicitly indicate the key to store the corresponding metadata dictionary.
88
+ the metadata is a dictionary object which contains: filename, original_shape, etc.
89
+ it can be a sequence of string, map to the `keys`.
90
+ if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
91
+ meta_key_postfix: if meta_keys is None, use `key_{postfix}` to store the metadata of the nifti image,
92
+ default is `meta_dict`. The metadata is a dictionary object.
93
+ For example, load nifti file for `image`, store the metadata into `image_meta_dict`.
94
+ overwriting: whether allow overwriting existing metadata of same key.
95
+ default is False, which will raise exception if encountering existing key.
96
+ image_only: if True return dictionary containing just only the image volumes, otherwise return
97
+ dictionary containing image data array and header dict per input key.
98
+ ensure_channel_first: if `True` and loaded both image array and metadata, automatically convert
99
+ the image array shape to `channel first`. default to `False`.
100
+ simple_keys: whether to remove redundant metadata keys, default to False for backward compatibility.
101
+ prune_meta_pattern: combined with `prune_meta_sep`, a regular expression used to match and prune keys
102
+ in the metadata (nested dictionary), default to None, no key deletion.
103
+ prune_meta_sep: combined with `prune_meta_pattern`, used to match and prune keys
104
+ in the metadata (nested dictionary). default is ".", see also :py:class:`monai.transforms.DeleteItemsd`.
105
+ e.g. ``prune_meta_pattern=".*_code$", prune_meta_sep=" "`` removes meta keys that ends with ``"_code"``.
106
+ allow_missing_keys: don't raise exception if key is missing.
107
+ args: additional parameters for reader if providing a reader name.
108
+ kwargs: additional parameters for reader if providing a reader name.
109
+ """
110
+ super().__init__(keys, allow_missing_keys)
111
+
112
+ self._loader = LoadImage(
113
+ reader,
114
+ patientname,
115
+ image_only,
116
+ dtype,
117
+ ensure_channel_first,
118
+ simple_keys,
119
+ prune_meta_pattern,
120
+ prune_meta_sep,
121
+ *args,
122
+ **kwargs,
123
+ )
124
+
125
+ if not isinstance(meta_key_postfix, str):
126
+ raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
127
+ self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
128
+ if len(self.keys) != len(self.meta_keys):
129
+ raise ValueError("meta_keys should have the same length as keys.")
130
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
131
+ self.overwriting = overwriting
132
+
133
+ # Check patient name
134
+ if (len(patientname)==0):
135
+ raise ValueError("Patient name should not be empty.")
136
+
137
+ def register(self, reader: ImageReader):
138
+ self._loader.register(reader)
139
+
140
+
141
+ def __call__(self, data, reader: Optional[ImageReader] = None):
142
+ """
143
+ Raises:
144
+ KeyError: When not ``self.overwriting`` and key already exists in ``data``.
145
+
146
+ """
147
+ d = dict(data)
148
+ for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
149
+ data = self._loader(d[key], reader)
150
+ if self._loader.image_only:
151
+ d[key] = data
152
+ else:
153
+ if not isinstance(data, (tuple, list)):
154
+ raise ValueError("loader must return a tuple or list (because image_only=False was used).")
155
+ d[key] = data[0]
156
+ if not isinstance(data[1], dict):
157
+ raise ValueError("metadata must be a dict.")
158
+ meta_key = meta_key or f"{key}_{meta_key_postfix}"
159
+ if meta_key in d and not self.overwriting:
160
+ raise KeyError(f"Metadata with key {meta_key} already exists and overwriting=False.")
161
+ d[meta_key] = data[1]
162
+
163
+ return d
164
+
165
+ def switch_endianness(data, new="<"):
166
+ """
167
+ Convert the input `data` endianness to `new`.
168
+
169
+ Args:
170
+ data: input to be converted.
171
+ new: the target endianness, currently support "<" or ">".
172
+ """
173
+ if isinstance(data, np.ndarray):
174
+ # default to system endian
175
+ sys_native = "<" if (sys.byteorder == "little") else ">"
176
+ current_ = sys_native if data.dtype.byteorder not in ("<", ">") else data.dtype.byteorder
177
+ if new not in ("<", ">"):
178
+ raise NotImplementedError(f"Not implemented option new={new}.")
179
+ if current_ != new:
180
+ data = data.byteswap().newbyteorder(new)
181
+ elif isinstance(data, tuple):
182
+ data = tuple(switch_endianness(x, new) for x in data)
183
+ elif isinstance(data, list):
184
+ data = [switch_endianness(x, new) for x in data]
185
+ elif isinstance(data, dict):
186
+ data = {k: switch_endianness(v, new) for k, v in data.items()}
187
+ elif not isinstance(data, (bool, str, float, int, type(None))):
188
+ raise RuntimeError(f"Unknown type: {type(data).__name__}")
189
+ return data
190
+
191
+ class LoadImage(Transform):
192
+ """
193
+ Load image file or files from provided path based on reader.
194
+ If reader is not specified, this class automatically chooses readers
195
+ based on the supported suffixes and in the following order:
196
+
197
+ - User-specified reader at runtime when calling this loader.
198
+ - User-specified reader in the constructor of `LoadImage`.
199
+ - Readers from the last to the first in the registered list.
200
+ - Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
201
+ (npz, npy -> NumpyReader), (nrrd -> NrrdReader), (DICOM file -> ITKReader).
202
+
203
+ Please note that for png, jpg, bmp, and other 2D formats, readers often swap axis 0 and 1 after
204
+ loading the array because the `HW` definition for non-medical specific file formats is different
205
+ from other common medical packages.
206
+
207
+ See also:
208
+
209
+ - tutorial: https://github.com/Project-MONAI/tutorials/blob/master/modules/load_medical_images.ipynb
210
+
211
+ """
212
+
213
+ def __init__(
214
+ self,
215
+ reader="NibabelReader",
216
+ patientname:str="",
217
+ image_only: bool = False,
218
+ dtype: DtypeLike = np.float32,
219
+ ensure_channel_first: bool = False,
220
+ simple_keys: bool = False,
221
+ prune_meta_pattern: Optional[str] = None,
222
+ prune_meta_sep: str = ".",
223
+ *args,
224
+ **kwargs,
225
+ ) -> None:
226
+ """
227
+ Args:
228
+ reader: reader to load image file and metadata
229
+ - if `reader` is None, a default set of `SUPPORTED_READERS` will be used.
230
+ - if `reader` is a string, it's treated as a class name or dotted path
231
+ (such as ``"monai.data.ITKReader"``), the supported built-in reader classes are
232
+ ``"ITKReader"``, ``"NibabelReader"``, ``"NumpyReader"``, ``"PydicomReader"``.
233
+ a reader instance will be constructed with the `*args` and `**kwargs` parameters.
234
+ - if `reader` is a reader class/instance, it will be registered to this loader accordingly.
235
+ image_only: if True return only the image MetaTensor, otherwise return image and header dict.
236
+ dtype: if not None convert the loaded image to this data type.
237
+ ensure_channel_first: if `True` and loaded both image array and metadata, automatically convert
238
+ the image array shape to `channel first`. default to `False`.
239
+ simple_keys: whether to remove redundant metadata keys, default to False for backward compatibility.
240
+ prune_meta_pattern: combined with `prune_meta_sep`, a regular expression used to match and prune keys
241
+ in the metadata (nested dictionary), default to None, no key deletion.
242
+ prune_meta_sep: combined with `prune_meta_pattern`, used to match and prune keys
243
+ in the metadata (nested dictionary). default is ".", see also :py:class:`monai.transforms.DeleteItemsd`.
244
+ e.g. ``prune_meta_pattern=".*_code$", prune_meta_sep=" "`` removes meta keys that ends with ``"_code"``.
245
+ args: additional parameters for reader if providing a reader name.
246
+ kwargs: additional parameters for reader if providing a reader name.
247
+
248
+ Note:
249
+
250
+ - The transform returns a MetaTensor, unless `set_track_meta(False)` has been used, in which case, a
251
+ `torch.Tensor` will be returned.
252
+ - If `reader` is specified, the loader will attempt to use the specified readers and the default supported
253
+ readers. This might introduce overheads when handling the exceptions of trying the incompatible loaders.
254
+ In this case, it is therefore recommended setting the most appropriate reader as
255
+ the last item of the `reader` parameter.
256
+
257
+ """
258
+
259
+ self.auto_select = reader is None
260
+ self.image_only = image_only
261
+ self.dtype = dtype
262
+ self.ensure_channel_first = ensure_channel_first
263
+ self.simple_keys = simple_keys
264
+ self.pattern = prune_meta_pattern
265
+ self.sep = prune_meta_sep
266
+ self.patientname = patientname
267
+ return
268
+
269
+ def __call__(self, filename: Union[Sequence[PathLike], PathLike], reader: Optional[ImageReader] = NibabelReader):
270
+ """
271
+ Load image file and metadata from the given filename(s).
272
+ If `reader` is not specified, this class automatically chooses readers based on the
273
+ reversed order of registered readers `self.readers`.
274
+
275
+ Args:
276
+ filename: path file or file-like object or a list of files.
277
+ will save the filename to meta_data with key `filename_or_obj`.
278
+ if provided a list of files, use the filename of first file to save,
279
+ and will stack them together as multi-channels data.
280
+ if provided directory path instead of file path, will treat it as
281
+ DICOM images series and read.
282
+ reader: runtime reader to load image file and metadata.
283
+
284
+ """
285
+ filename = tuple(f"{Path(s).expanduser()}" for s in ensure_tuple(filename)) # allow Path objects
286
+ img, err = None, []
287
+ # if reader is not None:
288
+ reader=NibabelReader()
289
+ img = reader.read(filename) # runtime specified reader
290
+ header = img.header
291
+ img_array = img.get_fdata()
292
+ img_array: NdarrayOrTensor
293
+ img_array, meta_data = reader.get_data(img)
294
+ img_array = convert_to_dst_type(img_array, dst=img_array, dtype=self.dtype)[0]
295
+ if not isinstance(meta_data, dict):
296
+ raise ValueError("`meta_data` must be a dict.")
297
+ # make sure all elements in metadata are little endian
298
+ meta_data = switch_endianness(meta_data, "<")
299
+ # meta_data["patient_name"]=filename[0].split('/')[-3]
300
+ meta_data["patient_name"]=self.patientname
301
+ meta_data[Key.FILENAME_OR_OBJ] = f"{ensure_tuple(filename)[0]}" # Path obj should be strings for data loader
302
+ img = MetaTensor.ensure_torch_and_prune_meta(
303
+ img_array, meta_data, self.simple_keys, pattern=self.pattern, sep=self.sep
304
+ )
305
+ if self.ensure_channel_first:
306
+ img = EnsureChannelFirst()(img)
307
+ if self.image_only:
308
+ return img
309
+ return img, img.meta, header if isinstance(img, MetaTensor) else meta_data
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ monai==1.3.0