andreped commited on
Commit
3120fa9
1 Parent(s): 47dd310

refactored to PEP8

Browse files
.github/workflows/build.yml CHANGED
@@ -88,10 +88,6 @@ jobs:
88
 
89
  - name: Install wheel
90
  run: |
91
- python -V
92
- python3 -V
93
- pip -V
94
- pip3 -V
95
  pip3 install --find-links=${{github.workspace}} livermask
96
 
97
  - name: Download test sample
 
88
 
89
  - name: Install wheel
90
  run: |
 
 
 
 
91
  pip3 install --find-links=${{github.workspace}} livermask
92
 
93
  - name: Download test sample
livermask/livermask.py CHANGED
@@ -1,5 +1,6 @@
1
  import numpy as np
2
- import os, sys
 
3
  from tqdm import tqdm
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output, resample_from_to
 
1
  import numpy as np
2
+ import os
3
+ import sys
4
  from tqdm import tqdm
5
  import nibabel as nib
6
  from nibabel.processing import resample_to_output, resample_from_to
livermask/utils/process.py CHANGED
@@ -1,6 +1,6 @@
1
  import numpy as np
2
  import os, sys
3
- from tqdm import tqdm
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output, resample_from_to
6
  from scipy.ndimage import zoom
@@ -41,7 +41,7 @@ def liver_segmenter_wrapper(curr, output, cpu, verbose, multiple_flag, name):
41
  # run inference in a different process
42
  mp.set_start_method('spawn', force=True)
43
  with mp.Pool(processes=1, maxtasksperchild=1) as p: # , initializer=initializer)
44
- result = p.map_async(liver_segmenter, ((curr, output, cpu, verbose, multiple_flag, name), ))
45
  log.info("getting result from process...")
46
  ret = result.get()[0]
47
  return ret
@@ -67,7 +67,7 @@ def liver_segmenter(params):
67
  data = zoom(data, [img_size / data.shape[0], img_size / data.shape[1], 1.0], order=1)
68
 
69
  # intensity normalization
70
- intensity_clipping_range = [-150, 250] # HU clipping limits (Pravdaray's configs)
71
  data = intensity_normalization(volume=data, intensity_clipping_range=intensity_clipping_range)
72
 
73
  # fix orientation
@@ -78,8 +78,10 @@ def liver_segmenter(params):
78
  # predict on data
79
  pred = np.zeros_like(data).astype(np.float32)
80
  for i in tqdm(range(data.shape[-1]), "pred: ", disable=not verbose):
81
- pred[..., i] = model.predict(np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0), axis=-1), axis=0))[0, ..., 1]
82
- del data
 
 
83
 
84
  # threshold
85
  pred = (pred >= 0.4).astype(int)
@@ -123,7 +125,7 @@ def liver_segmenter(params):
123
  if multiple_flag:
124
  nib.save(resampled_lab, output + "/" + curr.split("/")[-1].split(".")[0] + "-livermask.nii")
125
  else:
126
- nib.save(resampled_lab, output + "-livermask.nii")
127
 
128
  return pred
129
  except KeyboardInterrupt:
@@ -131,7 +133,6 @@ def liver_segmenter(params):
131
 
132
 
133
  def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name_vessel):
134
-
135
  # check if cupy is available, if not, set cpu=True
136
  try:
137
  import cupy
@@ -150,7 +151,7 @@ def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name
150
  nib_volume = nib.load(curr)
151
  new_spacing = [1., 1., 1.]
152
  resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
153
- #resampled_volume = nib_volume
154
  org = resampled_volume.get_data().astype('float32')
155
 
156
  # HU clipping
@@ -159,10 +160,10 @@ def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name
159
  org[org > intensity_clipping_range[1]] = intensity_clipping_range[1]
160
 
161
  # Calculate maximum of number of patch at each side
162
- ze,ye,xe = org.shape
163
- xm = int(math.ceil((float(xe)/float(config.patch['patchside']))))
164
- ym = int(math.ceil((float(ye)/float(config.patch['patchside']))))
165
- zm = int(math.ceil((float(ze)/float(config.patch['patchside']))))
166
 
167
  margin = ((0, config.patch['patchside']),
168
  (0, config.patch['patchside']),
@@ -171,8 +172,10 @@ def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name
171
  org = chainer.Variable(xp.array(org[np.newaxis, np.newaxis, :], dtype=xp.float32))
172
 
173
  # init prediction array
174
- prediction_map = np.zeros((ze + config.patch['patchside'], ye + config.patch['patchside'], xe + config.patch['patchside']))
175
- probability_map = np.zeros((config.unet['number_of_label'], ze+config.patch['patchside'], ye + config.patch['patchside'], xe + config.patch['patchside']))
 
 
176
 
177
  log.info("predicting...")
178
  # Patch loop
@@ -182,34 +185,38 @@ def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name
182
  zi = int(s / (ym * xm)) * config.patch['patchside']
183
 
184
  # check if current region contains any liver mask, if not, skip
185
- parenchyma_patch = liver_mask[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']]
186
- #if np.count_nonzero(parenchyma_patch) == 0:
 
187
  if np.mean(parenchyma_patch) < 0.25:
188
  continue
189
 
190
  # Extract patch from original image
191
- patch = org[:, :, zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']]
 
192
  with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
193
  probability_patch = unet(patch)
194
 
195
  # Generate probability map
196
  probability_patch = probability_patch.data
197
- #if args.gpu >= 0:
198
  if not cpu:
199
  probability_patch = chainer.cuda.to_cpu(probability_patch)
200
  for ch in range(probability_patch.shape[1]):
201
- probability_map[ch, zi:zi + config.patch['patchside'],yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']] = probability_patch[0, ch, :, :, :]
 
202
 
203
  prediction_patch = np.argmax(probability_patch, axis=1)
204
 
205
- prediction_map[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']] = prediction_patch[0, :, :, :]
 
206
 
207
- probability_map = probability_map[:, :ze, :ye, :xe]
208
  prediction_map = prediction_map[:ze, :ye, :xe]
209
 
210
  # post-process prediction
211
- #prediction_map = prediction_map + liver_mask
212
- #prediction_map[prediction_map > 0] = 1
213
 
214
  # filter segmented vessels outside the predicted liver parenchyma
215
  pred = prediction_map.astype(np.uint8)
 
1
  import numpy as np
2
  import os, sys
3
+ from tqdm import tqdm
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output, resample_from_to
6
  from scipy.ndimage import zoom
 
41
  # run inference in a different process
42
  mp.set_start_method('spawn', force=True)
43
  with mp.Pool(processes=1, maxtasksperchild=1) as p: # , initializer=initializer)
44
+ result = p.map_async(liver_segmenter, ((curr, output, cpu, verbose, multiple_flag, name),))
45
  log.info("getting result from process...")
46
  ret = result.get()[0]
47
  return ret
 
67
  data = zoom(data, [img_size / data.shape[0], img_size / data.shape[1], 1.0], order=1)
68
 
69
  # intensity normalization
70
+ intensity_clipping_range = [-150, 250] # HU clipping limits (Pravdaray's configs)
71
  data = intensity_normalization(volume=data, intensity_clipping_range=intensity_clipping_range)
72
 
73
  # fix orientation
 
78
  # predict on data
79
  pred = np.zeros_like(data).astype(np.float32)
80
  for i in tqdm(range(data.shape[-1]), "pred: ", disable=not verbose):
81
+ pred[..., i] = \
82
+ model.predict(np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0), axis=-1), axis=0))[
83
+ 0, ..., 1]
84
+ del data
85
 
86
  # threshold
87
  pred = (pred >= 0.4).astype(int)
 
125
  if multiple_flag:
126
  nib.save(resampled_lab, output + "/" + curr.split("/")[-1].split(".")[0] + "-livermask.nii")
127
  else:
128
+ nib.save(resampled_lab, output + "-livermask.nii")
129
 
130
  return pred
131
  except KeyboardInterrupt:
 
133
 
134
 
135
  def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name_vessel):
 
136
  # check if cupy is available, if not, set cpu=True
137
  try:
138
  import cupy
 
151
  nib_volume = nib.load(curr)
152
  new_spacing = [1., 1., 1.]
153
  resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
154
+ # resampled_volume = nib_volume
155
  org = resampled_volume.get_data().astype('float32')
156
 
157
  # HU clipping
 
160
  org[org > intensity_clipping_range[1]] = intensity_clipping_range[1]
161
 
162
  # Calculate maximum of number of patch at each side
163
+ ze, ye, xe = org.shape
164
+ xm = int(math.ceil((float(xe) / float(config.patch['patchside']))))
165
+ ym = int(math.ceil((float(ye) / float(config.patch['patchside']))))
166
+ zm = int(math.ceil((float(ze) / float(config.patch['patchside']))))
167
 
168
  margin = ((0, config.patch['patchside']),
169
  (0, config.patch['patchside']),
 
172
  org = chainer.Variable(xp.array(org[np.newaxis, np.newaxis, :], dtype=xp.float32))
173
 
174
  # init prediction array
175
+ prediction_map = np.zeros(
176
+ (ze + config.patch['patchside'], ye + config.patch['patchside'], xe + config.patch['patchside']))
177
+ probability_map = np.zeros((config.unet['number_of_label'], ze + config.patch['patchside'],
178
+ ye + config.patch['patchside'], xe + config.patch['patchside']))
179
 
180
  log.info("predicting...")
181
  # Patch loop
 
185
  zi = int(s / (ym * xm)) * config.patch['patchside']
186
 
187
  # check if current region contains any liver mask, if not, skip
188
+ parenchyma_patch = liver_mask[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'],
189
+ xi:xi + config.patch['patchside']]
190
+ # if np.count_nonzero(parenchyma_patch) == 0:
191
  if np.mean(parenchyma_patch) < 0.25:
192
  continue
193
 
194
  # Extract patch from original image
195
+ patch = org[:, :, zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'],
196
+ xi:xi + config.patch['patchside']]
197
  with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
198
  probability_patch = unet(patch)
199
 
200
  # Generate probability map
201
  probability_patch = probability_patch.data
202
+ # if args.gpu >= 0:
203
  if not cpu:
204
  probability_patch = chainer.cuda.to_cpu(probability_patch)
205
  for ch in range(probability_patch.shape[1]):
206
+ probability_map[ch, zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'],
207
+ xi:xi + config.patch['patchside']] = probability_patch[0, ch, :, :, :]
208
 
209
  prediction_patch = np.argmax(probability_patch, axis=1)
210
 
211
+ prediction_map[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'],
212
+ xi:xi + config.patch['patchside']] = prediction_patch[0, :, :, :]
213
 
214
+ # probability_map = probability_map[:, :ze, :ye, :xe]
215
  prediction_map = prediction_map[:ze, :ye, :xe]
216
 
217
  # post-process prediction
218
+ # prediction_map = prediction_map + liver_mask
219
+ # prediction_map[prediction_map > 0] = 1
220
 
221
  # filter segmented vessels outside the predicted liver parenchyma
222
  pred = prediction_map.astype(np.uint8)
livermask/utils/unet3d.py CHANGED
@@ -1,23 +1,17 @@
1
- #coding:utf-8
2
- '''
3
- * @auther mygw
4
- * @date 2018-6-15
5
- '''
6
-
7
  import chainer
8
  import chainer.functions as F
9
  import chainer.links as L
10
 
11
- class UNet3D(chainer.Chain):
12
 
 
13
  def __init__(self, num_of_label):
14
  w = chainer.initializers.HeNormal()
15
  super(UNet3D, self).__init__()
16
  with self.init_scope():
17
  # encoder pass
18
- self.ce0 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=16, ksize=3, pad=1,initialW=w)
19
  self.bne0 = L.BatchNormalization(16)
20
- self.ce1 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1,initialW=w)
21
  self.bne1 = L.BatchNormalization(32)
22
 
23
  self.ce2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
@@ -31,22 +25,23 @@ class UNet3D(chainer.Chain):
31
  # decoder pass
32
  self.cd4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=128, ksize=3, pad=1, initialW=w)
33
  self.bnd4 = L.BatchNormalization(128)
34
- self.deconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2, initialW=w, nobias=True)
 
35
 
36
- self.cd3 = L.ConvolutionND(ndim=3, in_channels=64+128, out_channels=64, ksize=3, pad=1, initialW=w)
37
  self.bnd3 = L.BatchNormalization(64)
38
  self.cd2 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
39
  self.bnd2 = L.BatchNormalization(64)
40
- self.deconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2, initialW=w,nobias=True)
 
41
 
42
- self.cd1 = L.ConvolutionND(ndim=3, in_channels=32+64, out_channels=32, ksize=3, pad=1, initialW=w)
43
  self.bnd1 = L.BatchNormalization(32)
44
  self.cd0 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
45
  self.bnd0 = L.BatchNormalization(32)
46
  self.lcl = L.ConvolutionND(ndim=3, in_channels=32, out_channels=num_of_label, ksize=1, pad=0, initialW=w)
47
 
48
  def __call__(self, x):
49
-
50
  # encoder pass
51
  e0 = F.relu(self.bne0(self.ce0(x)))
52
  e1 = F.relu(self.bne1(self.ce1(e0)))
@@ -69,25 +64,24 @@ class UNet3D(chainer.Chain):
69
  del d1
70
  lcl = F.softmax(self.lcl(d0), axis=1)
71
 
72
- return lcl #(batchsize, ch, z, y, x)
73
-
74
 
75
  def cropping(self, input, ref):
76
  '''
77
  * @param input encoder feature map
78
  * @param ref decoder feature map
79
  '''
80
- edgez = (input.shape[2] - ref.shape[2])/2
81
- edgey = (input.shape[3] - ref.shape[3])/2
82
- edgex = (input.shape[4] - ref.shape[4])/2
83
  edgez = int(edgex)
84
  edgey = int(edgey)
85
  edgex = int(edgez)
86
 
87
- X = F.split_axis(input,(edgex,int(input.shape[4]-edgex)),axis=4)
88
  X = X[1]
89
- X = F.split_axis(X,(edgey,int(X.shape[3]-edgey)),axis=3)
90
  X = X[1]
91
- X = F.split_axis(X,(edgez,int (X.shape[2]-edgez)),axis=2)
92
  X = X[1]
93
  return X
 
 
 
 
 
 
 
1
  import chainer
2
  import chainer.functions as F
3
  import chainer.links as L
4
 
 
5
 
6
+ class UNet3D(chainer.Chain):
7
  def __init__(self, num_of_label):
8
  w = chainer.initializers.HeNormal()
9
  super(UNet3D, self).__init__()
10
  with self.init_scope():
11
  # encoder pass
12
+ self.ce0 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=16, ksize=3, pad=1, initialW=w)
13
  self.bne0 = L.BatchNormalization(16)
14
+ self.ce1 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1, initialW=w)
15
  self.bne1 = L.BatchNormalization(32)
16
 
17
  self.ce2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
 
25
  # decoder pass
26
  self.cd4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=128, ksize=3, pad=1, initialW=w)
27
  self.bnd4 = L.BatchNormalization(128)
28
+ self.deconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2, initialW=w,
29
+ nobias=True)
30
 
31
+ self.cd3 = L.ConvolutionND(ndim=3, in_channels=64 + 128, out_channels=64, ksize=3, pad=1, initialW=w)
32
  self.bnd3 = L.BatchNormalization(64)
33
  self.cd2 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
34
  self.bnd2 = L.BatchNormalization(64)
35
+ self.deconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2, initialW=w,
36
+ nobias=True)
37
 
38
+ self.cd1 = L.ConvolutionND(ndim=3, in_channels=32 + 64, out_channels=32, ksize=3, pad=1, initialW=w)
39
  self.bnd1 = L.BatchNormalization(32)
40
  self.cd0 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
41
  self.bnd0 = L.BatchNormalization(32)
42
  self.lcl = L.ConvolutionND(ndim=3, in_channels=32, out_channels=num_of_label, ksize=1, pad=0, initialW=w)
43
 
44
  def __call__(self, x):
 
45
  # encoder pass
46
  e0 = F.relu(self.bne0(self.ce0(x)))
47
  e1 = F.relu(self.bne1(self.ce1(e0)))
 
64
  del d1
65
  lcl = F.softmax(self.lcl(d0), axis=1)
66
 
67
+ return lcl # (batchsize, ch, z, y, x)
 
68
 
69
  def cropping(self, input, ref):
70
  '''
71
  * @param input encoder feature map
72
  * @param ref decoder feature map
73
  '''
74
+ edgez = (input.shape[2] - ref.shape[2]) / 2
75
+ edgey = (input.shape[3] - ref.shape[3]) / 2
76
+ edgex = (input.shape[4] - ref.shape[4]) / 2
77
  edgez = int(edgex)
78
  edgey = int(edgey)
79
  edgex = int(edgez)
80
 
81
+ X = F.split_axis(input, (edgex, int(input.shape[4] - edgex)), axis=4)
82
  X = X[1]
83
+ X = F.split_axis(X, (edgey, int(X.shape[3] - edgey)), axis=3)
84
  X = X[1]
85
+ X = F.split_axis(X, (edgez, int(X.shape[2] - edgez)), axis=2)
86
  X = X[1]
87
  return X
livermask/utils/utils.py CHANGED
@@ -6,14 +6,12 @@ from .unet3d import UNet3D
6
 
7
  def get_model(output):
8
  url = "https://drive.google.com/uc?id=12or5Q79at2BtLgQ7IaglNGPFGRlEgEHc"
9
- md5 = "ef5a6dfb794b39bea03f5496a9a49d4d"
10
- gdown.cached_download(url, output) #, md5=md5) #, postprocess=gdown.extractall)
11
 
12
 
13
  def get_vessel_model(output):
14
  url = "https://drive.google.com/uc?id=1-8VNoRmIeiF1uIuWBqmZXz_6dIQFSAxN"
15
- #md5 = "ef5a6dfb794b39bea03f5496a9a49d4d"
16
- gdown.cached_download(url, output) #, md5=md5)
17
 
18
 
19
  def load_vessel_model(path, cpu):
@@ -22,7 +20,6 @@ def load_vessel_model(path, cpu):
22
  if not cpu:
23
  chainer.cuda.get_device_from_id(0).use()
24
  unet.to_gpu()
25
-
26
  xp = unet.xp
27
  return unet, xp
28
 
 
6
 
7
  def get_model(output):
8
  url = "https://drive.google.com/uc?id=12or5Q79at2BtLgQ7IaglNGPFGRlEgEHc"
9
+ gdown.cached_download(url, output)
 
10
 
11
 
12
  def get_vessel_model(output):
13
  url = "https://drive.google.com/uc?id=1-8VNoRmIeiF1uIuWBqmZXz_6dIQFSAxN"
14
+ gdown.cached_download(url, output)
 
15
 
16
 
17
  def load_vessel_model(path, cpu):
 
20
  if not cpu:
21
  chainer.cuda.get_device_from_id(0).use()
22
  unet.to_gpu()
 
23
  xp = unet.xp
24
  return unet, xp
25
 
livermask/utils/yaml_utils.py CHANGED
@@ -1,13 +1,8 @@
1
- import shutil
2
- import sys, os, time
3
  import yaml
4
 
5
 
6
- # Copy from tgans repo.
7
  class Config(object):
8
- '''
9
- 'https://github.com/pfnet-research/sngan_projection/blob/master/source/yaml_utils.py'
10
- '''
11
  def __init__(self, config_dict):
12
  self.config = config_dict
13
 
 
 
 
1
  import yaml
2
 
3
 
4
+ # https://github.com/pfnet-research/sngan_projection/blob/master/source/yaml_utils.py
5
  class Config(object):
 
 
 
6
  def __init__(self, config_dict):
7
  self.config = config_dict
8