andreped commited on
Commit
46b1a17
1 Parent(s): 06664ab

Worked on 2D viewer [no ci]

Browse files
Files changed (2) hide show
  1. demo/README.md +9 -0
  2. demo/app.py +132 -7
demo/README.md CHANGED
@@ -40,6 +40,15 @@ of the predicted liver parenchyma 3D volume when finished processing.
40
  Analysis process can be monitored from the `Logs` tab next to the `Running` button
41
  in the Hugging Face `livermask` space.
42
 
 
 
 
 
 
 
 
 
 
43
  Natural future TODOs include:
44
  - [ ] Add gallery widget to enable scrolling through 2D slices
45
  - [ ] Render segmentation for individual 2D slices as overlays
 
40
  Analysis process can be monitored from the `Logs` tab next to the `Running` button
41
  in the Hugging Face `livermask` space.
42
 
43
+ It is also possible to build the app as a docker image and deploy it. To do so follow these steps:
44
+
45
+ ```
46
+ docker build -t livermask ..
47
+ docker run -it -p 7860:7860 livermask
48
+ ```
49
+
50
+ Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
51
+
52
  Natural future TODOs include:
53
  - [ ] Add gallery widget to enable scrolling through 2D slices
54
  - [ ] Render segmentation for individual 2D slices as overlays
demo/app.py CHANGED
@@ -3,6 +3,8 @@ import subprocess as sp
3
  from skimage.measure import marching_cubes
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output
 
 
6
 
7
 
8
  def nifti_to_glb(path):
@@ -39,15 +41,138 @@ def load_mesh(mesh_file_name):
39
  return "./prediction.obj"
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  if __name__ == "__main__":
43
  print("Launching demo...")
44
- demo = gr.Interface(
45
- fn=load_mesh,
46
- inputs=gr.UploadButton(label="Click to Upload a File", file_types=[".nii", ".nii.nz"], file_count="single"),
47
- outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
48
- title="livermask: Automatic Liver Parenchyma segmentation in CT",
49
- description="Using pretrained deep learning model trained on the LiTS17 dataset",
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
52
  # inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
53
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
 
3
  from skimage.measure import marching_cubes
4
  import nibabel as nib
5
  from nibabel.processing import resample_to_output
6
+ import numpy as np
7
+ import random
8
 
9
 
10
  def nifti_to_glb(path):
 
41
  return "./prediction.obj"
42
 
43
 
44
+ def setup_gallery(data_path, pred_path):
45
+ image = nib.load(data_path)
46
+ resampled = resample_to_output(image, [1, 1, 1], order=1)
47
+ data = resampled.get_fdata().astype("uint8")
48
+
49
+ image = nib.load(pred_path)
50
+ resampled = resample_to_output(image, [1, 1, 1], order=0)
51
+ pred = resampled.get_fdata().astype("uint8")
52
+
53
+
54
+ def load_ct_to_numpy(data_path):
55
+ if type(data_path) != str:
56
+ data_path = data_path.name
57
+
58
+ image = nib.load(data_path)
59
+ data = image.get_fdata()
60
+
61
+ data = np.rot90(data, k=1, axes=(0, 1))
62
+
63
+ data[data < -150] = -150
64
+ data[data > 250] = 250
65
+
66
+ data = data - np.amin(data)
67
+ data = data / np.amax(data) * 255
68
+ data = data.astype("uint8")
69
+
70
+ print(data.shape)
71
+ return [data[..., i] for i in range(data.shape[-1])]
72
+
73
+
74
+ def upload_file(file):
75
+ return file.name
76
+
77
+ #def select_section(evt: gr.SelectData):
78
+ # return section_labels[evt.index]
79
+
80
+
81
  if __name__ == "__main__":
82
  print("Launching demo...")
83
+ with gr.Blocks() as demo:
84
+ """
85
+ with gr.Blocks() as demo:
86
+ with gr.Row():
87
+ text1 = gr.Textbox(label="t1")
88
+ slider2 = gr.Textbox(label="slide")
89
+ drop3 = gr.Dropdown(["a", "b", "c"], label="d3")
90
+ with gr.Row():
91
+ with gr.Column(scale=1, min_width=600):
92
+ text1 = gr.Textbox(label="prompt 1")
93
+ text2 = gr.Textbox(label="prompt 2")
94
+ inbtw = gr.Button("Between")
95
+ text4 = gr.Textbox(label="prompt 1")
96
+ text5 = gr.Textbox(label="prompt 2")
97
+ with gr.Column(scale=2, min_width=600):
98
+ img1 = gr.Image("images/cheetah.jpg")
99
+ btn = gr.Button("Go").style(full_width=True)
100
+
101
+ greeter_1 = gr.Interface(lambda name: f"Hello {name}!", inputs="textbox", outputs=gr.Textbox(label="Greeter 1"))
102
+ greeter_2 = gr.Interface(lambda name: f"Greetings {name}!", inputs="textbox", outputs=gr.Textbox(label="Greeter 2"))
103
+ demo = gr.Parallel(greeter_1, greeter_2)
104
+
105
+ volume_renderer = gr.Interface(
106
+ fn=load_mesh,
107
+ inputs=gr.UploadButton(label="Click to Upload a File", file_types=[".nii", ".nii.nz"], file_count="single"),
108
+ outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
109
+ title="livermask: Automatic Liver Parenchyma segmentation in CT",
110
+ description="Using pretrained deep learning model trained on the LiTS17 dataset",
111
+ )
112
+ """
113
+
114
+ with gr.Row():
115
+ # file_output = gr.File()
116
+ upload_button = gr.UploadButton(label="Click to Upload a File", file_types=[".nii", ".nii.nz"], file_count="single")
117
+ # upload_button.upload(upload_file, upload_button, file_output)
118
+
119
+ #select_btn = gr.Button("Run analysis")
120
+ #select_btn.click(fn=upload_file, inputs=upload_button, outputs=output, api_name="Analysis")
121
+
122
+ #upload_button.click(section, [img_input, num_boxes, num_segments], img_output)
123
+
124
+ #print("file output:", file_output)
125
+
126
+ images = load_ct_to_numpy("./test-volume.nii")
127
+
128
+ def variable_outputs(k):
129
+ k = int(k) - 1
130
+ out = [gr.AnnotatedImage.update(visible=False)] * len(images)
131
+ out[k] = gr.AnnotatedImage.update(visible=True)
132
+ return out
133
+
134
+ def section(img, num_segments):
135
+ sections = []
136
+ for b in range(num_segments):
137
+ x = random.randint(0, img.shape[1])
138
+ y = random.randint(0, img.shape[0])
139
+ r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
140
+ mask = np.zeros(img.shape[:2])
141
+ for i in range(img.shape[0]):
142
+ for j in range(img.shape[1]):
143
+ dist_square = (i - y) ** 2 + (j - x) ** 2
144
+ if dist_square < r**2:
145
+ mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4
146
+ sections.append((mask, "parenchyma"))
147
+ return (img, sections)
148
+
149
+ with gr.Row():
150
+ s = gr.Slider(1, len(images), value=1, step=1, label="Which 2D slice to show")
151
+
152
+ with gr.Row():
153
+ with gr.Box():
154
+ images_boxes = []
155
+ for i, image in enumerate(images):
156
+ visibility = True if i == 1 else False # only first slide visible - change slide through slider
157
+ t = gr.AnnotatedImage(value=section(image, 1), visible=visibility).style(color_map={"parenchyma": "#ffae00"}, width=image.shape[1])
158
+ images_boxes.append(t)
159
+
160
+ s.change(variable_outputs, s, images_boxes)
161
+
162
+
163
+ #upload_button.upload(upload_file, upload_button, file_output)
164
+
165
+ #section_btn.click(section, [images[40], num_boxes, num_segments], img_output)
166
+ #ct_images.upload(section, [images[40], num_boxes, num_segments], img_output)
167
+
168
+ #demo = gr.Interface(
169
+ # fn=load_ct_to_numpy,
170
+ # inputs=gr.UploadButton(label="Click to Upload a File", file_types=[".nii", ".nii.nz"], file_count="single"),
171
+ # outputs=gr.Gallery(label="CT slices").style(columns=[4], rows=[4], object_fit="contain", height="auto"),
172
+ # title="livermask: Automatic Liver Parenchyma segmentation in CT",
173
+ # description="Using pretrained deep learning model trained on the LiTS17 dataset",
174
+ #)
175
+
176
  # sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
177
  # inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
178
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)