andreped commited on
Commit
362a9f8
1 Parent(s): 106be34

Added option to segment the portal and hepatic veins (one class). Liver segmentation is now run in a separate Process, as TF is unable to clear the GPU manually - only when a session is killed, which is only when a Process is finished/killed.

Browse files
.gitignore CHANGED
@@ -4,3 +4,4 @@ dist/
4
  livermask.egg-info/
5
  *.h5
6
  *.nii
 
 
4
  livermask.egg-info/
5
  *.h5
6
  *.nii
7
+ *__pycache__/
livermask/configs/base.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ batchsize: 10
2
+ iteration: 55000
3
+ iteration_decay_start: 50000
4
+ seed: 0
5
+ display_interval: 100
6
+ snapshot_interval: 5000
7
+ evaluation_interval: 5000
8
+
9
+ patch:
10
+ patchside: 64
11
+
12
+ unet:
13
+ fn: model.py
14
+ number_of_label: 2
15
+
16
+ updater:
17
+ fn: updater.py
18
+
19
+ adam:
20
+ alpha: 0.0001
21
+ beta1: 0.9
22
+ beta2: 0.999
livermask/livermask.py CHANGED
@@ -1,11 +1,10 @@
1
- import numpy as np
2
  import os, sys
3
  from tqdm import tqdm
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output, resample_from_to
6
  from scipy.ndimage import zoom
7
  from tensorflow.python.keras.models import load_model
8
- import gdown
9
  from skimage.morphology import remove_small_holes, binary_dilation, binary_erosion, ball
10
  from skimage.measure import label, regionprops
11
  import warnings
@@ -13,51 +12,35 @@ import argparse
13
  import pkg_resources
14
  import tensorflow as tf
15
  import logging as log
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # due to this: https://github.com/tensorflow/tensorflow/issues/35029
19
  warnings.filterwarnings('ignore', '.*output shape of zoom.*') # mute some warnings
20
 
21
 
22
- def intensity_normalization(volume, intensity_clipping_range):
23
- result = np.copy(volume)
24
-
25
- result[volume < intensity_clipping_range[0]] = intensity_clipping_range[0]
26
- result[volume > intensity_clipping_range[1]] = intensity_clipping_range[1]
27
-
28
- min_val = np.amin(result)
29
- max_val = np.amax(result)
30
- if (max_val - min_val) != 0:
31
- result = (result - min_val) / (max_val - min_val)
32
- return result
33
-
34
- def post_process(pred):
35
- return pred
36
-
37
- def get_model(output):
38
- url = "https://drive.google.com/uc?id=12or5Q79at2BtLgQ7IaglNGPFGRlEgEHc"
39
- md5 = "ef5a6dfb794b39bea03f5496a9a49d4d"
40
- gdown.cached_download(url, output, md5=md5) #, postprocess=gdown.extractall)
41
-
42
- def verboseHandler(verbose):
43
- if verbose:
44
- log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
45
- log.info("Verbose output.")
46
- else:
47
- log.basicConfig(format="%(levelname)s: %(message)s")
48
-
49
- def func(path, output, cpu, verbose):
50
  # enable verbose or not
51
- verboseHandler(verbose)
52
 
53
  cwd = "/".join(os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"
54
  name = cwd + "model.h5"
 
55
 
56
- # get model
57
  get_model(name)
58
 
59
- # load model
60
- model = load_model(name, compile=False)
61
 
62
  if not os.path.isdir(path):
63
  paths = [path]
@@ -73,76 +56,13 @@ def func(path, output, cpu, verbose):
73
  if not curr.endswith(".nii"):
74
  continue
75
 
76
- log.info("preprocessing...")
77
- nib_volume = nib.load(curr)
78
- new_spacing = [1., 1., 1.]
79
- resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
80
- data = resampled_volume.get_data().astype('float32')
81
-
82
- curr_shape = data.shape
83
-
84
- # resize to get (512, 512) output images
85
- img_size = 512
86
- data = zoom(data, [img_size / data.shape[0], img_size / data.shape[1], 1.0], order=1)
87
-
88
- # intensity normalization
89
- intensity_clipping_range = [-150, 250] # HU clipping limits (Pravdaray's configs)
90
- data = intensity_normalization(volume=data, intensity_clipping_range=intensity_clipping_range)
91
-
92
- # fix orientation
93
- data = np.rot90(data, k=1, axes=(0, 1))
94
- data = np.flip(data, axis=0)
95
-
96
- log.info("predicting...")
97
- # predict on data
98
- pred = np.zeros_like(data).astype(np.float32)
99
- for i in tqdm(range(data.shape[-1]), "pred: ", disable=not verbose):
100
- pred[..., i] = model.predict(np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0), axis=-1), axis=0))[0, ..., 1]
101
- del data
102
-
103
- # threshold
104
- pred = (pred >= 0.4).astype(int)
105
-
106
- # fix orientation back
107
- pred = np.flip(pred, axis=0)
108
- pred = np.rot90(pred, k=-1, axes=(0, 1))
109
-
110
- log.info("resize back...")
111
- # resize back from 512x512
112
- pred = zoom(pred, [curr_shape[0] / img_size, curr_shape[1] / img_size, 1.0], order=1)
113
- pred = (pred >= 0.5).astype(bool)
114
-
115
- log.info("morphological post-processing...")
116
- # morpological post-processing
117
- # 1) first erode
118
- pred = binary_erosion(pred, ball(3)).astype(np.int32)
119
-
120
- # 2) keep only largest connected component
121
- labels = label(pred)
122
- regions = regionprops(labels)
123
- area_sizes = []
124
- for region in regions:
125
- area_sizes.append([region.label, region.area])
126
- area_sizes = np.array(area_sizes)
127
- tmp = np.zeros_like(pred)
128
- tmp[labels == area_sizes[np.argmax(area_sizes[:, 1]), 0]] = 1
129
- pred = tmp.copy()
130
- del tmp, labels, regions, area_sizes
131
-
132
- # 3) dilate
133
- pred = binary_dilation(pred.astype(bool), ball(3))
134
-
135
- # 4) remove small holes
136
- pred = remove_small_holes(pred.astype(bool), area_threshold=0.001 * np.prod(pred.shape))
137
-
138
- log.info("saving...")
139
- pred = pred.astype(np.uint8)
140
- img = nib.Nifti1Image(pred, affine=resampled_volume.affine)
141
- resampled_lab = resample_from_to(img, nib_volume, order=0)
142
- if multiple_flag:
143
- nib.save(resampled_lab, output + "/" + curr.split("/")[-1].split(".")[0] + "-livermask" + ".nii")
144
- else:
145
- nib.save(resampled_lab, output + ".nii")
146
 
147
  def main():
148
  parser = argparse.ArgumentParser()
@@ -154,6 +74,8 @@ def main():
154
  help="force using the CPU even if a GPU is available.")
155
  parser.add_argument('--verbose', action='store_true',
156
  help="enable verbose.")
 
 
157
  ret = parser.parse_args(sys.argv[1:]); print(ret)
158
 
159
  if ret.cpu:
 
1
+ import numpy as np
2
  import os, sys
3
  from tqdm import tqdm
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output, resample_from_to
6
  from scipy.ndimage import zoom
7
  from tensorflow.python.keras.models import load_model
 
8
  from skimage.morphology import remove_small_holes, binary_dilation, binary_erosion, ball
9
  from skimage.measure import label, regionprops
10
  import warnings
 
12
  import pkg_resources
13
  import tensorflow as tf
14
  import logging as log
15
+ import chainer
16
+ import math
17
+ from unet3d import UNet3D
18
+ import yaml
19
+ from tensorflow.keras import backend as K
20
+ from numba import cuda
21
+ from utils.process import liver_segmenter_wrapper, vessel_segmenter, intensity_normalization
22
+ from utils.utils import verboseHandler
23
+ import logging as log
24
+ from utils.utils import get_model, get_vessel_model
25
 
26
 
27
  os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # due to this: https://github.com/tensorflow/tensorflow/issues/35029
28
  warnings.filterwarnings('ignore', '.*output shape of zoom.*') # mute some warnings
29
 
30
 
31
+ def func(path, output, cpu, verbose, vessels):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # enable verbose or not
33
+ log = verboseHandler(verbose)
34
 
35
  cwd = "/".join(os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"
36
  name = cwd + "model.h5"
37
+ name_vessel = cwd + "model-hepatic_vessel.npz"
38
 
39
+ # get models
40
  get_model(name)
41
 
42
+ if vessels:
43
+ get_vessel_model(name_vessel)
44
 
45
  if not os.path.isdir(path):
46
  paths = [path]
 
56
  if not curr.endswith(".nii"):
57
  continue
58
 
59
+ # perform liver parenchyma segmentation, launch it in separate process to properly clear memory
60
+ pred = liver_segmenter_wrapper(curr, output, cpu, verbose, multiple_flag, name)
61
+
62
+ if vessels:
63
+ # perform liver vessel segmentation
64
+ vessel_segmenter(curr, output, cpu, verbose, multiple_flag, pred, name_vessel)
65
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  def main():
68
  parser = argparse.ArgumentParser()
 
74
  help="force using the CPU even if a GPU is available.")
75
  parser.add_argument('--verbose', action='store_true',
76
  help="enable verbose.")
77
+ parser.add_argument('--vessels', action='store_true',
78
+ help="segment vessels.")
79
  ret = parser.parse_args(sys.argv[1:]); print(ret)
80
 
81
  if ret.cpu:
livermask/unet3d.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #coding:utf-8
2
+ '''
3
+ * @auther mygw
4
+ * @date 2018-6-15
5
+ '''
6
+
7
+ import chainer
8
+ import chainer.functions as F
9
+ import chainer.links as L
10
+
11
+ class UNet3D(chainer.Chain):
12
+
13
+ def __init__(self, num_of_label):
14
+ w = chainer.initializers.HeNormal()
15
+ super(UNet3D, self).__init__()
16
+ with self.init_scope():
17
+ # encoder pass
18
+ self.ce0 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=16, ksize=3, pad=1,initialW=w)
19
+ self.bne0 = L.BatchNormalization(16)
20
+ self.ce1 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1,initialW=w)
21
+ self.bne1 = L.BatchNormalization(32)
22
+
23
+ self.ce2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
24
+ self.bne2 = L.BatchNormalization(32)
25
+ self.ce3 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=64, ksize=3, pad=1, initialW=w)
26
+ self.bne3 = L.BatchNormalization(64)
27
+
28
+ self.ce4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
29
+ self.bne4 = L.BatchNormalization(64)
30
+
31
+ # decoder pass
32
+ self.cd4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=128, ksize=3, pad=1, initialW=w)
33
+ self.bnd4 = L.BatchNormalization(128)
34
+ self.deconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2, initialW=w, nobias=True)
35
+
36
+ self.cd3 = L.ConvolutionND(ndim=3, in_channels=64+128, out_channels=64, ksize=3, pad=1, initialW=w)
37
+ self.bnd3 = L.BatchNormalization(64)
38
+ self.cd2 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
39
+ self.bnd2 = L.BatchNormalization(64)
40
+ self.deconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2, initialW=w,nobias=True)
41
+
42
+ self.cd1 = L.ConvolutionND(ndim=3, in_channels=32+64, out_channels=32, ksize=3, pad=1, initialW=w)
43
+ self.bnd1 = L.BatchNormalization(32)
44
+ self.cd0 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
45
+ self.bnd0 = L.BatchNormalization(32)
46
+ self.lcl = L.ConvolutionND(ndim=3, in_channels=32, out_channels=num_of_label, ksize=1, pad=0, initialW=w)
47
+
48
+ def __call__(self, x):
49
+
50
+ # encoder pass
51
+ e0 = F.relu(self.bne0(self.ce0(x)))
52
+ e1 = F.relu(self.bne1(self.ce1(e0)))
53
+ del e0
54
+ e2 = F.relu(self.bne2(self.ce2(F.max_pooling_nd(e1, ksize=2, stride=2))))
55
+ e3 = F.relu(self.bne3(self.ce3(e2)))
56
+ del e2
57
+ e4 = F.relu(self.bne4(self.ce4(F.max_pooling_nd(e3, ksize=2, stride=2))))
58
+
59
+ # decoder pass
60
+ d4 = F.relu(self.bnd4(self.cd4(e4)))
61
+ del e4
62
+ d3 = F.relu(self.bnd3(self.cd3(F.concat([self.deconv2(d4), e3]))))
63
+ del d4, e3
64
+ d2 = F.relu(self.bnd2(self.cd2(d3)))
65
+ del d3
66
+ d1 = F.relu(self.bnd1(self.cd1(F.concat([self.deconv1(d2), e1]))))
67
+ del d2, e1
68
+ d0 = F.relu(self.bnd0(self.cd0(d1)))
69
+ del d1
70
+ lcl = F.softmax(self.lcl(d0), axis=1)
71
+
72
+ return lcl #(batchsize, ch, z, y, x)
73
+
74
+
75
+ def cropping(self, input, ref):
76
+ '''
77
+ * @param input encoder feature map
78
+ * @param ref decoder feature map
79
+ '''
80
+ edgez = (input.shape[2] - ref.shape[2])/2
81
+ edgey = (input.shape[3] - ref.shape[3])/2
82
+ edgex = (input.shape[4] - ref.shape[4])/2
83
+ edgez = int(edgex)
84
+ edgey = int(edgey)
85
+ edgex = int(edgez)
86
+
87
+ X = F.split_axis(input,(edgex,int(input.shape[4]-edgex)),axis=4)
88
+ X = X[1]
89
+ X = F.split_axis(X,(edgey,int(X.shape[3]-edgey)),axis=3)
90
+ X = X[1]
91
+ X = F.split_axis(X,(edgez,int (X.shape[2]-edgez)),axis=2)
92
+ X = X[1]
93
+ return X
livermask/utils/process.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os, sys
3
+ from tqdm import tqdm
4
+ import nibabel as nib
5
+ from nibabel.processing import resample_to_output, resample_from_to
6
+ from scipy.ndimage import zoom
7
+ from tensorflow.python.keras.models import load_model
8
+ import gdown
9
+ from skimage.morphology import remove_small_holes, binary_dilation, binary_erosion, ball
10
+ from skimage.measure import label, regionprops
11
+ import warnings
12
+ import argparse
13
+ import pkg_resources
14
+ import tensorflow as tf
15
+ import logging as log
16
+ import chainer
17
+ import utils.yaml_utils as yaml_utils
18
+ import math
19
+ from unet3d import UNet3D
20
+ import yaml
21
+ from tensorflow.keras import backend as K
22
+ from numba import cuda
23
+ from utils.utils import load_vessel_model
24
+ import multiprocessing as mp
25
+
26
+
27
+ def intensity_normalization(volume, intensity_clipping_range):
28
+ result = np.copy(volume)
29
+
30
+ result[volume < intensity_clipping_range[0]] = intensity_clipping_range[0]
31
+ result[volume > intensity_clipping_range[1]] = intensity_clipping_range[1]
32
+
33
+ min_val = np.amin(result)
34
+ max_val = np.amax(result)
35
+ if (max_val - min_val) != 0:
36
+ result = (result - min_val) / (max_val - min_val)
37
+ return result
38
+
39
+
40
+ def liver_segmenter_wrapper(curr, output, cpu, verbose, multiple_flag, name):
41
+ # run inference in a different process
42
+ p = mp.Pool(processes=1, maxtasksperchild=1) # , initializer=initializer)
43
+ result = p.map_async(liver_segmenter, ((curr, output, cpu, verbose, multiple_flag, name), ))
44
+ return result.get()[0]
45
+
46
+
47
+ def liver_segmenter(params):
48
+ try:
49
+ curr, output, cpu, verbose, multiple_flag, name = params
50
+
51
+ # load model
52
+ model = load_model(name, compile=False)
53
+
54
+ log.info("preprocessing...")
55
+ nib_volume = nib.load(curr)
56
+ new_spacing = [1., 1., 1.]
57
+ resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
58
+ data = resampled_volume.get_data().astype('float32')
59
+
60
+ curr_shape = data.shape
61
+
62
+ # resize to get (512, 512) output images
63
+ img_size = 512
64
+ data = zoom(data, [img_size / data.shape[0], img_size / data.shape[1], 1.0], order=1)
65
+
66
+ # intensity normalization
67
+ intensity_clipping_range = [-150, 250] # HU clipping limits (Pravdaray's configs)
68
+ data = intensity_normalization(volume=data, intensity_clipping_range=intensity_clipping_range)
69
+
70
+ # fix orientation
71
+ data = np.rot90(data, k=1, axes=(0, 1))
72
+ data = np.flip(data, axis=0)
73
+
74
+ log.info("predicting...")
75
+ # predict on data
76
+ pred = np.zeros_like(data).astype(np.float32)
77
+ for i in tqdm(range(data.shape[-1]), "pred: ", disable=not verbose):
78
+ pred[..., i] = model.predict(np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0), axis=-1), axis=0))[0, ..., 1]
79
+ del data
80
+
81
+ # threshold
82
+ pred = (pred >= 0.4).astype(int)
83
+
84
+ # fix orientation back
85
+ pred = np.flip(pred, axis=0)
86
+ pred = np.rot90(pred, k=-1, axes=(0, 1))
87
+
88
+ log.info("resize back...")
89
+ # resize back from 512x512
90
+ pred = zoom(pred, [curr_shape[0] / img_size, curr_shape[1] / img_size, 1.0], order=1)
91
+ pred = (pred >= 0.5).astype(bool)
92
+
93
+ log.info("morphological post-processing...")
94
+ # morpological post-processing
95
+ # 1) first erode
96
+ pred = binary_erosion(pred, ball(3)).astype(np.int32)
97
+
98
+ # 2) keep only largest connected component
99
+ labels = label(pred)
100
+ regions = regionprops(labels)
101
+ area_sizes = []
102
+ for region in regions:
103
+ area_sizes.append([region.label, region.area])
104
+ area_sizes = np.array(area_sizes)
105
+ tmp = np.zeros_like(pred)
106
+ tmp[labels == area_sizes[np.argmax(area_sizes[:, 1]), 0]] = 1
107
+ pred = tmp.copy()
108
+ del tmp, labels, regions, area_sizes
109
+
110
+ # 3) dilate
111
+ pred = binary_dilation(pred.astype(bool), ball(3))
112
+
113
+ # 4) remove small holes
114
+ pred = remove_small_holes(pred.astype(bool), area_threshold=0.001 * np.prod(pred.shape))
115
+
116
+ log.info("saving...")
117
+ pred = pred.astype(np.uint8)
118
+ img = nib.Nifti1Image(pred, affine=resampled_volume.affine)
119
+ resampled_lab = resample_from_to(img, nib_volume, order=0)
120
+ if multiple_flag:
121
+ nib.save(resampled_lab, output + "/" + curr.split("/")[-1].split(".")[0] + "-livermask.nii")
122
+ else:
123
+ nib.save(resampled_lab, output + "-livermask.nii")
124
+
125
+ return pred
126
+ except KeyboardInterrupt:
127
+ raise "Caught KeyboardInterrupt, terminating worker"
128
+
129
+
130
+ def vessel_segmenter(curr, output, cpu, verbose, multiple_flag, liver_mask, name_vessel):
131
+
132
+ # load model
133
+ unet, xp = load_vessel_model(name_vessel, cpu)
134
+
135
+ # read config
136
+ config = yaml_utils.Config(yaml.safe_load(open("./configs/base.yml")))
137
+
138
+ # read data
139
+ nib_volume = nib.load(curr)
140
+ new_spacing = [1., 1., 1.]
141
+ resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
142
+ #resampled_volume = nib_volume
143
+ org = resampled_volume.get_data().astype('float32')
144
+
145
+ # HU clipping
146
+ intensity_clipping_range = [80, 220]
147
+ org[org < intensity_clipping_range[0]] = intensity_clipping_range[0]
148
+ org[org > intensity_clipping_range[1]] = intensity_clipping_range[1]
149
+
150
+ # Calculate maximum of number of patch at each side
151
+ ze,ye,xe = org.shape
152
+ xm = int(math.ceil((float(xe)/float(config.patch['patchside']))))
153
+ ym = int(math.ceil((float(ye)/float(config.patch['patchside']))))
154
+ zm = int(math.ceil((float(ze)/float(config.patch['patchside']))))
155
+
156
+ margin = ((0, config.patch['patchside']),
157
+ (0, config.patch['patchside']),
158
+ (0, config.patch['patchside']))
159
+ org = np.pad(org, margin, 'edge')
160
+ org = chainer.Variable(xp.array(org[np.newaxis, np.newaxis, :], dtype=xp.float32))
161
+
162
+ # init prediction array
163
+ prediction_map = np.zeros((ze + config.patch['patchside'], ye + config.patch['patchside'], xe + config.patch['patchside']))
164
+ probability_map = np.zeros((config.unet['number_of_label'], ze+config.patch['patchside'], ye + config.patch['patchside'], xe + config.patch['patchside']))
165
+
166
+ # Patch loop
167
+ for s in tqdm(range(xm * ym * zm), ascii=True, desc='Patch loop'):
168
+ xi = int(s % xm) * config.patch['patchside']
169
+ yi = int((s % (ym * xm)) / xm) * config.patch['patchside']
170
+ zi = int(s / (ym * xm)) * config.patch['patchside']
171
+
172
+ # check if current region contains any liver mask, if not, skip
173
+ parenchyma_patch = liver_mask[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']]
174
+ #if np.count_nonzero(parenchyma_patch) == 0:
175
+ if np.mean(parenchyma_patch) < 0.25:
176
+ continue
177
+
178
+ # Extract patch from original image
179
+ patch = org[:, :, zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']]
180
+ with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
181
+ probability_patch = unet(patch)
182
+
183
+ # Generate probability map
184
+ probability_patch = probability_patch.data
185
+ #if args.gpu >= 0:
186
+ if not cpu:
187
+ probability_patch = chainer.cuda.to_cpu(probability_patch)
188
+ for ch in range(probability_patch.shape[1]):
189
+ probability_map[ch, zi:zi + config.patch['patchside'],yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']] = probability_patch[0, ch, :, :, :]
190
+
191
+ prediction_patch = np.argmax(probability_patch, axis=1)
192
+
193
+ prediction_map[zi:zi + config.patch['patchside'], yi:yi + config.patch['patchside'], xi:xi + config.patch['patchside']] = prediction_patch[0, :, :, :]
194
+
195
+ print('Save image')
196
+ probability_map = probability_map[:, :ze, :ye, :xe]
197
+ prediction_map = prediction_map[:ze, :ye, :xe]
198
+
199
+ # post-process prediction
200
+ #prediction_map = prediction_map + liver_mask
201
+ #prediction_map[prediction_map > 0] = 1
202
+
203
+ log.info("saving...")
204
+ pred = prediction_map.astype(np.uint8)
205
+ img = nib.Nifti1Image(pred, affine=resampled_volume.affine)
206
+ resampled_lab = resample_from_to(img, nib_volume, order=0)
207
+ if multiple_flag:
208
+ nib.save(resampled_lab, output + "/" + curr.split("/")[-1].split(".")[0] + "-vessels.nii")
209
+ else:
210
+ nib.save(resampled_lab, output + "-vessels.nii")
livermask/utils/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gdown
2
+ import logging as log
3
+ import chainer
4
+ from unet3d import UNet3D
5
+
6
+
7
+ def get_model(output):
8
+ url = "https://drive.google.com/uc?id=12or5Q79at2BtLgQ7IaglNGPFGRlEgEHc"
9
+ md5 = "ef5a6dfb794b39bea03f5496a9a49d4d"
10
+ gdown.cached_download(url, output, md5=md5) #, postprocess=gdown.extractall)
11
+
12
+
13
+ def get_vessel_model(output):
14
+ url = "https://drive.google.com/uc?id=1-8VNoRmIeiF1uIuWBqmZXz_6dIQFSAxN"
15
+ #md5 = "ef5a6dfb794b39bea03f5496a9a49d4d"
16
+ gdown.cached_download(url, output) #, md5=md5)
17
+
18
+
19
+ def load_vessel_model(path, cpu):
20
+ unet = UNet3D(num_of_label=2)
21
+ chainer.serializers.load_npz(path, unet)
22
+
23
+ if not cpu:
24
+ chainer.cuda.get_device_from_id(0).use()
25
+ unet.to_gpu()
26
+
27
+ xp = unet.xp
28
+
29
+ return unet, xp
30
+
31
+
32
+ def verboseHandler(verbose):
33
+ if verbose:
34
+ log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
35
+ log.info("Verbose output.")
36
+ else:
37
+ log.basicConfig(format="%(levelname)s: %(message)s")
38
+ return log
livermask/utils/yaml_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #coding:utf-8
2
+ import shutil
3
+ import sys, os, time
4
+ import yaml
5
+ from utils import yaml_utils
6
+
7
+
8
+ #sys.path.append(os.path.dirname(__file__))
9
+
10
+ # Copy from tgans repo.
11
+ class Config(object):
12
+ '''
13
+ 'https://github.com/pfnet-research/sngan_projection/blob/master/source/yaml_utils.py'
14
+ '''
15
+ def __init__(self, config_dict):
16
+ self.config = config_dict
17
+
18
+ def __getattr__(self, key):
19
+ if key in self.config:
20
+ return self.config[key]
21
+ else:
22
+ raise AttributeError(key)
23
+
24
+ def __getitem__(self, key):
25
+ return self.config[key]
26
+
27
+ def __repr__(self):
28
+ return yaml.dump(self.config, default_flow_style=False)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ