Upload train_data_prepare.py
Browse files- train_data_prepare.py +186 -0
train_data_prepare.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import multiprocessing
|
4 |
+
import argparse
|
5 |
+
from scipy import sparse
|
6 |
+
from sklearn.model_selection import train_test_split
|
7 |
+
import json
|
8 |
+
join = os.path.join
|
9 |
+
|
10 |
+
from monai.transforms import (
|
11 |
+
AddChanneld,
|
12 |
+
Compose,
|
13 |
+
LoadImaged,
|
14 |
+
Orientationd,
|
15 |
+
)
|
16 |
+
|
17 |
+
def set_parse():
|
18 |
+
# %% set up parser
|
19 |
+
parser = argparse.ArgumentParser()
|
20 |
+
parser.add_argument("-category", default=['liver', 'right kidney', 'spleen', 'pancreas', 'aorta', 'inferior vena cava', 'right adrenal gland', 'left adrenal gland', 'gallbladder', 'esophagus', 'stomach', 'duodenum', 'left kidney'], type=list)
|
21 |
+
parser.add_argument("-image_dir", type=str, required=True)
|
22 |
+
parser.add_argument("-label_dir", type=str, required=True)
|
23 |
+
parser.add_argument("-dataset_code", type=str, required=True)
|
24 |
+
parser.add_argument("-save_root", type=str, required=True)
|
25 |
+
parser.add_argument("-test_ratio", type=float, required=True)
|
26 |
+
|
27 |
+
args = parser.parse_args()
|
28 |
+
return args
|
29 |
+
|
30 |
+
args = set_parse()
|
31 |
+
|
32 |
+
# get ct> dir
|
33 |
+
image_list_all = [item for item in sorted(os.listdir(args.image_dir))]
|
34 |
+
label_list_all = [item for item in sorted(os.listdir(args.label_dir))]
|
35 |
+
assert len(image_list_all) == len(label_list_all)
|
36 |
+
print('dataset size ', len(image_list_all))
|
37 |
+
|
38 |
+
# build dataset
|
39 |
+
data_path_list_all = []
|
40 |
+
for idx in range(len(image_list_all)):
|
41 |
+
img_path = join(args.image_dir, image_list_all[idx])
|
42 |
+
label_path = join(args.label_dir, label_list_all[idx])
|
43 |
+
name = image_list_all[idx].split('.')[0]
|
44 |
+
info = (idx, name, img_path, label_path)
|
45 |
+
data_path_list_all.append(info)
|
46 |
+
|
47 |
+
img_loader = Compose(
|
48 |
+
[
|
49 |
+
LoadImaged(keys=['image', 'label']),
|
50 |
+
AddChanneld(keys=['image', 'label']),
|
51 |
+
# Orientationd(keys=['image', 'label'], axcodes="RAS"),
|
52 |
+
]
|
53 |
+
)
|
54 |
+
|
55 |
+
# save
|
56 |
+
save_path = join(args.save_root, args.dataset_code)
|
57 |
+
os.makedirs(save_path, exist_ok=True)
|
58 |
+
|
59 |
+
# ct_save_path = join(save_path, 'ct')
|
60 |
+
# gt_save_path = join(save_path, 'gt')
|
61 |
+
# if not os.path.exists(ct_save_path):
|
62 |
+
# os.makedirs(ct_save_path)
|
63 |
+
# if not os.path.exists(gt_save_path):
|
64 |
+
# os.makedirs(gt_save_path)
|
65 |
+
|
66 |
+
# exist file:
|
67 |
+
exist_file_list = os.listdir(save_path)
|
68 |
+
print('exist_file_list ', exist_file_list)
|
69 |
+
|
70 |
+
def normalize(ct_narray):
|
71 |
+
ct_voxel_ndarray = ct_narray.copy()
|
72 |
+
ct_voxel_ndarray = ct_voxel_ndarray.flatten()
|
73 |
+
# for all data
|
74 |
+
thred = np.mean(ct_voxel_ndarray)
|
75 |
+
voxel_filtered = ct_voxel_ndarray[(ct_voxel_ndarray > thred)]
|
76 |
+
# for foreground data
|
77 |
+
upper_bound = np.percentile(voxel_filtered, 99.95)
|
78 |
+
lower_bound = np.percentile(voxel_filtered, 00.05)
|
79 |
+
mean = np.mean(voxel_filtered)
|
80 |
+
std = np.std(voxel_filtered)
|
81 |
+
### transform ###
|
82 |
+
ct_narray = np.clip(ct_narray, lower_bound, upper_bound)
|
83 |
+
ct_narray = (ct_narray - mean) / max(std, 1e-8)
|
84 |
+
return ct_narray
|
85 |
+
|
86 |
+
def run(info):
|
87 |
+
idx, file_name, case_path, label_path = info
|
88 |
+
|
89 |
+
item = {}
|
90 |
+
if file_name in exist_file_list:
|
91 |
+
print(file_name + ' exist, skip')
|
92 |
+
return
|
93 |
+
print('process ', idx, '---' ,file_name)
|
94 |
+
# generate ct_voxel_ndarray
|
95 |
+
item_load = {
|
96 |
+
'image' : case_path,
|
97 |
+
'label' : label_path,
|
98 |
+
}
|
99 |
+
item_load = img_loader(item_load)
|
100 |
+
ct_voxel_ndarray = item_load['image']
|
101 |
+
gt_voxel_ndarray = item_load['label']
|
102 |
+
|
103 |
+
ct_shape = ct_voxel_ndarray.shape
|
104 |
+
item['image'] = ct_voxel_ndarray
|
105 |
+
|
106 |
+
# generate gt_voxel_ndarray
|
107 |
+
gt_voxel_ndarray = np.array(gt_voxel_ndarray).squeeze()
|
108 |
+
present_categories = np.unique(gt_voxel_ndarray)
|
109 |
+
gt_masks = []
|
110 |
+
for cls_idx in range(len(args.category)):
|
111 |
+
cls = cls_idx + 1
|
112 |
+
if cls not in present_categories:
|
113 |
+
gt_voxel_ndarray_category = np.zeros(ct_shape)
|
114 |
+
gt_masks.append(gt_voxel_ndarray_category)
|
115 |
+
print('case {} ==> zero category '.format(idx) + args.category[cls_idx])
|
116 |
+
print(gt_voxel_ndarray_category.shape)
|
117 |
+
else:
|
118 |
+
gt_voxel_ndarray_category = gt_voxel_ndarray.copy()
|
119 |
+
gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0
|
120 |
+
gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1
|
121 |
+
gt_masks.append(gt_voxel_ndarray_category)
|
122 |
+
gt_voxel_ndarray = np.stack(gt_masks, axis=0)
|
123 |
+
|
124 |
+
assert gt_voxel_ndarray.shape[0] == len(args.category), str(gt_voxel_ndarray.shape[0])
|
125 |
+
assert gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]
|
126 |
+
item['label'] = gt_voxel_ndarray.astype(np.int32)
|
127 |
+
print(idx, ' load done!')
|
128 |
+
|
129 |
+
#############################
|
130 |
+
item['image'] = normalize(item['image'])
|
131 |
+
print(idx, ' transform done')
|
132 |
+
|
133 |
+
############################
|
134 |
+
print(file_name + ' ct gt <--> ', item['image'].shape, item['label'].shape)
|
135 |
+
case_path = join(save_path, file_name)
|
136 |
+
os.makedirs(case_path, exist_ok=True)
|
137 |
+
|
138 |
+
np.save(join(case_path, 'image.npy'), item['image'])
|
139 |
+
allmatrix_sp=sparse.csr_matrix(item['label'].reshape(item['label'].shape[0], -1))
|
140 |
+
sparse.save_npz(join(case_path, 'mask_' + str(item['label'].shape)), allmatrix_sp)
|
141 |
+
print(file_name + ' save done!')
|
142 |
+
|
143 |
+
def generate_dataset_json(root_dir, output_file, test_ratio=0.2):
|
144 |
+
cases = os.listdir(root_dir)
|
145 |
+
ct_paths, gt_paths = [], []
|
146 |
+
for case_name in cases:
|
147 |
+
case_files = sorted(os.listdir(join(root_dir, case_name)))
|
148 |
+
ct_path = join(root_dir, case_name, case_files[0])
|
149 |
+
gt_path = join(root_dir, case_name, case_files[1])
|
150 |
+
ct_paths.append(ct_path)
|
151 |
+
gt_paths.append(gt_path)
|
152 |
+
|
153 |
+
data = list(zip(ct_paths, gt_paths))
|
154 |
+
train_data, val_data = train_test_split(data, test_size=test_ratio)
|
155 |
+
labels = {}
|
156 |
+
labels['0'] = 'background'
|
157 |
+
for idx in range(len(args.category)):
|
158 |
+
label_name = args.category[idx]
|
159 |
+
label_id = idx + 1
|
160 |
+
labels[str(label_id)] = label_name
|
161 |
+
dataset = {
|
162 |
+
'name': f'{args.dataset_code} Dataset',
|
163 |
+
'description': f'{args.dataset_code} Dataset',
|
164 |
+
'tensorImageSize': '4D',
|
165 |
+
'modality': {
|
166 |
+
'0': 'CT',
|
167 |
+
},
|
168 |
+
'labels': labels,
|
169 |
+
'numTrain': len(train_data),
|
170 |
+
'numTest': len(val_data),
|
171 |
+
'train': [{'image': ct_path, 'label': gt_path} for ct_path, gt_path in train_data],
|
172 |
+
'test': [{'image': ct_path, 'label': gt_path} for ct_path, gt_path in val_data]
|
173 |
+
}
|
174 |
+
with open(output_file, 'w') as f:
|
175 |
+
print(f'{output_file} dump')
|
176 |
+
json.dump(dataset, f, indent=2)
|
177 |
+
|
178 |
+
if __name__ == "__main__":
|
179 |
+
with multiprocessing.Pool(processes=64) as pool:
|
180 |
+
pool.map(run, data_path_list_all)
|
181 |
+
print('Process Finished!')
|
182 |
+
|
183 |
+
generate_dataset_json(root_dir=save_path,
|
184 |
+
output_file=join(save_path, f'{args.dataset_code}.json'),
|
185 |
+
test_ratio=args.test_ratio)
|
186 |
+
print('Json Split Done!')
|