andreped commited on
Commit
ddc22b2
1 Parent(s): 6c74cba

Added a simple Gradio web demo

Browse files
.dockerfile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ venv/
2
+ *.nii
3
+ *.nii.gz
4
+ *.pyc
5
+ *.egg-info
6
+ *.csv
7
+ *__pycache__/
8
+ *.DS_Store
.gitignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ venv/
2
+ *.nii
3
+ *.nii.gz
4
+ *.pyc
5
+ *.egg-info
6
+ *.csv
7
+ *.ini
8
+ *__pycache__/
9
+ *.DS_Store
10
+ *.json
11
+ *.onnx
12
+ *.xml
13
+ *.obj
14
+ *.zip
15
+ *.txt
Dockerfile ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+ FROM python:3.8-slim
4
+
5
+ # set language, format and stuff
6
+ ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
7
+
8
+ WORKDIR /code
9
+
10
+ RUN apt-get update -y
11
+ #RUN apt-get install -y python3 python3-pip
12
+ RUN apt install git --fix-missing -y
13
+ RUN apt install wget -y
14
+
15
+ # installing other libraries
16
+ RUN apt-get install python3-pip -y && \
17
+ apt-get -y install sudo
18
+ RUN apt-get install curl -y
19
+ RUN apt-get install nano -y
20
+ RUN apt-get update && apt-get install -y git
21
+ RUN apt-get install libblas-dev -y && apt-get install liblapack-dev -y
22
+ RUN apt-get install gfortran -y
23
+ RUN apt-get install libpng-dev -y
24
+ RUN apt-get install python3-dev -y
25
+
26
+ WORKDIR /code
27
+
28
+ # install dependencies
29
+ COPY ./demo/requirements.txt /code/demo/requirements.txt
30
+ RUN pip install --no-cache-dir --upgrade -r /code/demo/requirements.txt
31
+
32
+ # resolve issue with tf==2.4 and gradio dependency collision issue
33
+ RUN pip install --force-reinstall typing_extensions==4.7.1
34
+
35
+ # lower pydantic version to work with typing_extensions deprecation
36
+ RUN pip install --force-reinstall "pydantic<2.0.0"
37
+
38
+ # Install wget
39
+ RUN apt install wget -y && \
40
+ apt install unzip
41
+
42
+ # Set up a new user named "user" with user ID 1000
43
+ RUN useradd -m -u 1000 user
44
+
45
+ # Switch to the "user" user
46
+ USER user
47
+
48
+ # Set home to the user's home directory
49
+ ENV HOME=/home/user \
50
+ PATH=/home/user/.local/bin:$PATH
51
+
52
+ # Set the working directory to the user's home directory
53
+ WORKDIR $HOME/app
54
+
55
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
56
+ COPY --chown=user . $HOME/app
57
+
58
+ # Download pretrained models
59
+ RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-CT_LymphNodes-ONNX-v12.zip" && \
60
+ unzip "Raidionics-CT_LymphNodes-ONNX-v12.zip" && mkdir -p resources/models/ && mv CT_LymphNodes/ resources/models/CT_LymphNodes/
61
+ RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-CT_Lungs-ONNX-v12.zip" && \
62
+ unzip "Raidionics-CT_Lungs-ONNX-v12.zip" && mv CT_Lungs/ resources/models/CT_Lungs/
63
+
64
+ RUN rm -r *.zip
65
+
66
+ # Download test sample
67
+ # @TODO: I have resampled the volume to 1mm isotropic for faster computation
68
+ RUN wget "https://github.com/andreped/neukit/releases/download/test-data/test_thorax_CT.nii.gz"
69
+
70
+ # CMD ["/bin/bash"]
71
+ CMD ["python3", "demo/app.py"]
demo/README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face demo - through docker SDK
2
+
3
+ Deploying simple models in a gradio-based web interface in Hugging Face spaces is easy.
4
+ For any other custom pipeline, with various dependencies and challenging behaviour, it
5
+ might be necessary to use Docker containers instead.
6
+
7
+ For every new push to the main branch, continuous deployment to the Hugging Face
8
+ `LyNoS` space is performed through a GitHub Actions workflow.
9
+
10
+ When the space is updated, the Docker image is rebuilt/updated (caching if possible).
11
+ Then when finished, the end users can test the app as they please.
12
+
13
+ Right now, the functionality of the app is extremely limited, only offering a widget
14
+ for uploading a NIfTI file (`.nii` or `.nii.gz`) and visualizing the produced surface
15
+ of the predicted lung tumor volume when finished processing.
16
+
17
+ Analysis process can be monitored from the `Logs` tab next to the `Running` button
18
+ in the Hugging Face `LyNoS` space.
19
+
20
+ It is also possible to build the app as a docker image and deploy it. To do so follow these steps:
21
+
22
+ ```
23
+ docker build -t LyNoS:latest ..
24
+ docker run -it -p 7860:7860 LyNoS:latest
25
+ ```
26
+
27
+ Then open `http://localhost:7860` in your favourite internet browser to view the demo.
demo/app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+
4
+ from src.gui import WebUI
5
+
6
+
7
+ def main():
8
+ parser = ArgumentParser()
9
+ parser.add_argument(
10
+ "--cwd",
11
+ type=str,
12
+ default="/home/user/app/",
13
+ help="Set current working directory (path to app.py).",
14
+ )
15
+ parser.add_argument(
16
+ "--share",
17
+ type=int,
18
+ default=0,
19
+ help="Whether to enable the app to be accessible online"
20
+ "-> setups a public link which requires internet access.",
21
+ )
22
+ args = parser.parse_args()
23
+
24
+ print("Current working directory:", args.cwd)
25
+
26
+ if not os.path.exists(args.cwd):
27
+ raise ValueError("Chosen 'cwd' is not a valid path!")
28
+ if args.share not in [0, 1]:
29
+ raise ValueError(
30
+ "The 'share' argument can only be set to 0 or 1, but was:",
31
+ args.share,
32
+ )
33
+
34
+ print("Current cwd:", args.cwd)
35
+
36
+ # initialize and run app
37
+ print("Launching demo...")
38
+ app = WebUI(cwd=args.cwd, share=args.share)
39
+ app.run()
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
demo/src/__init__.py ADDED
File without changes
demo/src/convert.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ from nibabel.processing import resample_to_output
3
+ from skimage.measure import marching_cubes
4
+
5
+
6
+ def nifti_to_obj(path, output="prediction.obj"):
7
+ # load NIFTI into numpy array
8
+ image = nib.load(path)
9
+ resampled = resample_to_output(image, [1, 1, 1], order=1)
10
+ data = resampled.get_fdata().astype("uint8")
11
+
12
+ # Create a material with a red diffuse color (RGB value)
13
+ red_material = "newmtl RedMaterial\nKd 1 0 0" # Red diffuse color (RGB)
14
+
15
+ # extract surface
16
+ verts, faces, normals, values = marching_cubes(data, 0)
17
+ faces += 1
18
+
19
+ with open(output, "w") as thefile:
20
+ # Write the material definition to the OBJ file
21
+ thefile.write(red_material + "\n")
22
+
23
+ for item in verts:
24
+ # thefile.write('usemtl RedMaterial\n')
25
+ thefile.write("v {0} {1} {2}\n".format(item[0], item[1], item[2]))
26
+
27
+ for item in normals:
28
+ thefile.write("vn {0} {1} {2}\n".format(item[0], item[1], item[2]))
29
+
30
+ for item in faces:
31
+ thefile.write(
32
+ "f {0}//{0} {1}//{1} {2}//{2}\n".format(
33
+ item[0], item[1], item[2]
34
+ )
35
+ )
demo/src/css_style.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = """
2
+ #model-3d {
3
+ height: 512px;
4
+ }
5
+ #model-2d {
6
+ height: 512px;
7
+ margin: auto;
8
+ }
9
+ #upload {
10
+ height: 110px;
11
+ }
12
+ #run-button {
13
+ height: 110px;
14
+ width: 150px;
15
+ }
16
+ #toggle-button {
17
+ height: 47px;
18
+ width: 150px;
19
+ }
20
+ #logs-button {
21
+ height: 47px;
22
+ width: 150px;
23
+ }
24
+ #logs {
25
+ height: auto
26
+ }
27
+ """
demo/src/gui.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+
5
+ from .convert import nifti_to_obj
6
+ from .css_style import css
7
+ from .inference import run_model
8
+ from .logger import flush_logs
9
+ from .logger import read_logs
10
+ from .logger import setup_logger
11
+ from .utils import load_ct_to_numpy
12
+ from .utils import load_pred_volume_to_numpy
13
+
14
+ # setup logging
15
+ LOGGER = setup_logger()
16
+
17
+
18
+ class WebUI:
19
+ def __init__(
20
+ self,
21
+ model_name: str = None,
22
+ cwd: str = "/home/user/app/",
23
+ share: int = 1,
24
+ ):
25
+ # global states
26
+ self.images = []
27
+ self.pred_images = []
28
+
29
+ # @TODO: This should be dynamically set based on chosen volume size
30
+ self.nb_slider_items = 820
31
+
32
+ self.model_name = model_name
33
+ self.cwd = cwd
34
+ self.share = share
35
+
36
+ self.class_name = "Lymph Nodes" # default
37
+ self.class_names = {
38
+ "Lymph Nodes": "CT_LymphNodes",
39
+ }
40
+
41
+ self.result_names = {
42
+ "Lymph Nodes": "LymphNodes",
43
+ }
44
+
45
+ # define widgets not to be rendered immediantly, but later on
46
+ self.slider = gr.Slider(
47
+ minimum=1,
48
+ maximum=self.nb_slider_items,
49
+ value=1,
50
+ step=1,
51
+ label="Which 2D slice to show",
52
+ )
53
+ self.volume_renderer = gr.Model3D(
54
+ clear_color=[0.0, 0.0, 0.0, 0.0],
55
+ label="3D Model",
56
+ show_label=True,
57
+ visible=True,
58
+ elem_id="model-3d",
59
+ camera_position=[90, 180, 768],
60
+ ).style(height=512)
61
+
62
+ def set_class_name(self, value):
63
+ LOGGER.info(f"Changed task to: {value}")
64
+ self.class_name = value
65
+
66
+ def combine_ct_and_seg(self, img, pred):
67
+ return (img, [(pred, self.class_name)])
68
+
69
+ def upload_file(self, file):
70
+ out = file.name
71
+ LOGGER.info(f"File uploaded: {out}")
72
+ return out
73
+
74
+ def process(self, mesh_file_name):
75
+ path = mesh_file_name.name
76
+ run_model(
77
+ path,
78
+ model_path=os.path.join(self.cwd, "resources/models/"),
79
+ task=self.class_names[self.class_name],
80
+ name=self.result_names[self.class_name],
81
+ )
82
+ LOGGER.info("Converting prediction NIfTI to OBJ...")
83
+ nifti_to_obj("prediction.nii.gz")
84
+
85
+ LOGGER.info("Loading CT to numpy...")
86
+ self.images = load_ct_to_numpy(path)
87
+
88
+ LOGGER.info("Loading prediction volume to numpy..")
89
+ self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
90
+
91
+ return "./prediction.obj"
92
+
93
+ def get_img_pred_pair(self, k):
94
+ k = int(k)
95
+ out = gr.AnnotatedImage(
96
+ self.combine_ct_and_seg(self.images[k], self.pred_images[k]),
97
+ visible=True,
98
+ elem_id="model-2d",
99
+ ).style(
100
+ color_map={self.class_name: "#ffae00"},
101
+ height=512,
102
+ width=512,
103
+ )
104
+ return out
105
+
106
+ def toggle_sidebar(self, state):
107
+ state = not state
108
+ return gr.update(visible=state), state
109
+
110
+ def run(self):
111
+ with gr.Blocks(css=css) as demo:
112
+ with gr.Row():
113
+ with gr.Column(visible=True, scale=0.2) as sidebar_left:
114
+ logs = gr.Textbox(
115
+ placeholder="\n" * 16,
116
+ label="Logs",
117
+ info="Verbose from inference will be displayed below.",
118
+ lines=38,
119
+ max_lines=38,
120
+ autoscroll=True,
121
+ elem_id="logs",
122
+ show_copy_button=True,
123
+ scroll_to_output=False,
124
+ container=True,
125
+ line_breaks=True,
126
+ )
127
+ demo.load(read_logs, None, logs, every=1)
128
+
129
+ with gr.Column():
130
+ with gr.Row():
131
+ with gr.Column(scale=0.2, min_width=150):
132
+ sidebar_state = gr.State(True)
133
+
134
+ btn_toggle_sidebar = gr.Button(
135
+ "Toggle Sidebar",
136
+ elem_id="toggle-button",
137
+ )
138
+ btn_toggle_sidebar.click(
139
+ self.toggle_sidebar,
140
+ [sidebar_state],
141
+ [sidebar_left, sidebar_state],
142
+ )
143
+
144
+ btn_clear_logs = gr.Button(
145
+ "Clear logs", elem_id="logs-button"
146
+ )
147
+ btn_clear_logs.click(flush_logs, [], [])
148
+
149
+ file_output = gr.File(
150
+ file_count="single", elem_id="upload"
151
+ )
152
+ file_output.upload(
153
+ self.upload_file, file_output, file_output
154
+ )
155
+
156
+ model_selector = gr.Dropdown(
157
+ list(self.class_names.keys()),
158
+ label="Task",
159
+ info="Which structure to segment.",
160
+ multiselect=False,
161
+ size="sm",
162
+ )
163
+ model_selector.input(
164
+ fn=lambda x: self.set_class_name(x),
165
+ inputs=model_selector,
166
+ outputs=None,
167
+ )
168
+
169
+ with gr.Column(scale=0.2, min_width=150):
170
+ run_btn = gr.Button(
171
+ "Run analysis",
172
+ variant="primary",
173
+ elem_id="run-button",
174
+ ).style(
175
+ full_width=False,
176
+ size="lg",
177
+ )
178
+ run_btn.click(
179
+ fn=lambda x: self.process(x),
180
+ inputs=file_output,
181
+ outputs=self.volume_renderer,
182
+ )
183
+
184
+ with gr.Row():
185
+ gr.Examples(
186
+ examples=[
187
+ os.path.join(self.cwd, "test_thorax_CT.nii.gz"),
188
+ ],
189
+ inputs=file_output,
190
+ outputs=file_output,
191
+ fn=self.upload_file,
192
+ cache_examples=True,
193
+ )
194
+
195
+ gr.Markdown(
196
+ """
197
+ **NOTE:** Inference might take several minutes (Lymph nodes: ~8 minutes), see logs to the left. \\
198
+ The segmentation will be available in the 2D and 3D viewers below when finished.
199
+ """
200
+ )
201
+
202
+ with gr.Row():
203
+ with gr.Box():
204
+ with gr.Column():
205
+ # create dummy image to be replaced by loaded images
206
+ t = gr.AnnotatedImage(
207
+ visible=True, elem_id="model-2d"
208
+ ).style(
209
+ color_map={self.class_name: "#ffae00"},
210
+ height=512,
211
+ width=512,
212
+ )
213
+
214
+ self.slider.input(
215
+ self.get_img_pred_pair,
216
+ self.slider,
217
+ t,
218
+ )
219
+
220
+ self.slider.render()
221
+
222
+ with gr.Box():
223
+ self.volume_renderer.render()
224
+
225
+ # sharing app publicly -> share=True:
226
+ # https://gradio.app/sharing-your-app/
227
+ # inference times > 60 seconds -> need queue():
228
+ # https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
229
+ demo.queue().launch(
230
+ server_name="0.0.0.0", server_port=7860, share=self.share
231
+ )
demo/src/inference.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import configparser
2
+ import logging
3
+ import os
4
+ import shutil
5
+ import traceback
6
+
7
+
8
+ def run_model(
9
+ input_path: str,
10
+ model_path: str,
11
+ verbose: str = "info",
12
+ task: str = "CT_LymphNodes",
13
+ name: str = "Lymph nodes",
14
+ ):
15
+ if verbose == "debug":
16
+ logging.getLogger().setLevel(logging.DEBUG)
17
+ elif verbose == "info":
18
+ logging.getLogger().setLevel(logging.INFO)
19
+ elif verbose == "error":
20
+ logging.getLogger().setLevel(logging.ERROR)
21
+ else:
22
+ raise ValueError("Unsupported verbose value provided:", verbose)
23
+
24
+ # delete patient/result folder if they exist
25
+ if os.path.exists("./patient/"):
26
+ shutil.rmtree("./patient/")
27
+ if os.path.exists("./result/"):
28
+ shutil.rmtree("./result/")
29
+
30
+ patient_directory = ""
31
+ output_path = ""
32
+ try:
33
+ # setup temporary patient directory
34
+ filename = input_path.split("/")[-1]
35
+ splits = filename.split(".")
36
+ extension = ".".join(splits[1:])
37
+ patient_directory = "./patient/"
38
+ os.makedirs(patient_directory + "T0/", exist_ok=True)
39
+ shutil.copy(
40
+ input_path,
41
+ patient_directory + "T0/" + splits[0] + "-t1gd." + extension,
42
+ )
43
+
44
+ # define output directory to save results
45
+ output_path = "./result/prediction-" + splits[0] + "/"
46
+ os.makedirs(output_path, exist_ok=True)
47
+
48
+ # Setting up the configuration file
49
+ rads_config = configparser.ConfigParser()
50
+ rads_config.add_section("Default")
51
+ rads_config.set("Default", "task", "mediastinum_diagnosis")
52
+ rads_config.set("Default", "caller", "")
53
+ rads_config.add_section("System")
54
+ rads_config.set("System", "gpu_id", "-1")
55
+ rads_config.set("System", "input_folder", patient_directory)
56
+ rads_config.set("System", "output_folder", output_path)
57
+ rads_config.set("System", "model_folder", model_path)
58
+ rads_config.set(
59
+ "System",
60
+ "pipeline_filename",
61
+ os.path.join(model_path, task, "pipeline.json"),
62
+ )
63
+ rads_config.add_section("Runtime")
64
+ rads_config.set(
65
+ "Runtime", "reconstruction_method", "thresholding"
66
+ ) # thresholding, probabilities
67
+ rads_config.set("Runtime", "reconstruction_order", "resample_first")
68
+ rads_config.set("Runtime", "use_preprocessed_data", "False")
69
+
70
+ with open("rads_config.ini", "w") as f:
71
+ rads_config.write(f)
72
+
73
+ # finally, run inference
74
+ from raidionicsrads.compute import run_rads
75
+
76
+ run_rads(config_filename="rads_config.ini")
77
+
78
+ # rename and move final result
79
+ os.rename(
80
+ "./result/prediction-"
81
+ + splits[0]
82
+ + "/T0/"
83
+ + splits[0]
84
+ + "-t1gd_annotation-"
85
+ + name
86
+ + ".nii.gz",
87
+ "./prediction.nii.gz",
88
+ )
89
+ # Clean-up
90
+ if os.path.exists(patient_directory):
91
+ shutil.rmtree(patient_directory)
92
+ if os.path.exists(output_path):
93
+ shutil.rmtree(output_path)
94
+
95
+ except Exception:
96
+ print(traceback.format_exc())
97
+ # Clean-up
98
+ if os.path.exists(patient_directory):
99
+ shutil.rmtree(patient_directory)
100
+ if os.path.exists(output_path):
101
+ shutil.rmtree(output_path)
demo/src/logger.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+
4
+
5
+ def get_logger():
6
+ return logging.getLogger(__name__)
7
+
8
+
9
+ def setup_logger():
10
+ # clear log
11
+ file_to_delete = open("log.txt", "w")
12
+ file_to_delete.close()
13
+
14
+ file_handler = logging.FileHandler(filename="log.txt")
15
+ stdout_handler = logging.StreamHandler(stream=sys.stdout)
16
+ handlers = [file_handler, stdout_handler]
17
+
18
+ logging.basicConfig(
19
+ level=logging.INFO,
20
+ format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s",
21
+ handlers=handlers,
22
+ )
23
+
24
+ return get_logger()
25
+
26
+
27
+ def read_logs():
28
+ sys.stdout.flush()
29
+ with open("log.txt", "r") as f:
30
+ return f.read()
31
+
32
+
33
+ def flush_logs():
34
+ sys.stdout.flush()
35
+ # clear log
36
+ file_to_delete = open("log.txt", "w")
37
+ file_to_delete.close()
demo/src/utils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import numpy as np
3
+
4
+
5
+ def load_ct_to_numpy(data_path):
6
+ if not isinstance(data_path, str):
7
+ data_path = data_path.name
8
+
9
+ image = nib.load(data_path)
10
+ data = image.get_fdata()
11
+
12
+ data = np.rot90(data, k=1, axes=(0, 1))
13
+ data = np.flip(data, axis=0)
14
+
15
+ data[data < -1024] = 1024
16
+ data[data > 1024] = 1024
17
+
18
+ data = data - np.amin(data)
19
+ data = data / np.amax(data) * 255
20
+ data = data.astype("uint8")
21
+
22
+ print(data.shape)
23
+ return [data[..., i] for i in range(data.shape[-1])]
24
+
25
+
26
+ def load_pred_volume_to_numpy(data_path):
27
+ if not isinstance(data_path, str):
28
+ data_path = data_path.name
29
+
30
+ image = nib.load(data_path)
31
+ data = image.get_fdata()
32
+
33
+ data = np.rot90(data, k=1, axes=(0, 1))
34
+ data = np.flip(data, axis=0)
35
+
36
+ data[data > 0] = 1
37
+ data = data.astype("uint8")
38
+
39
+ print(data.shape)
40
+ return [data[..., i] for i in range(data.shape[-1])]
setup.cfg ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [metadata]
2
+ description-file = README.md
3
+
4
+ [isort]
5
+ force_single_line=True
6
+ known_first_party=lynos
7
+ line_length=160
8
+ profile=black
9
+
10
+ [flake8]
11
+ # imported but unused in __init__.py, that's ok.
12
+ per-file-ignores=*__init__.py:F401
13
+ ignore=E203,W503,W605,F632,E266,E731,E712,E741
14
+ max-line-length=160