Upload app.py
Browse files
app.py
CHANGED
@@ -17,20 +17,28 @@ import llama_cpp.llama_tokenizer
|
|
17 |
|
18 |
# global params
|
19 |
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
20 |
examples_path = [
|
21 |
-
os.path.join(THIS_DIR, 'examples', 'HCC_003.nrrd'),
|
22 |
-
os.path.join(THIS_DIR, 'examples', '
|
23 |
-
os.path.join(THIS_DIR, 'examples', '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
]
|
25 |
models_path = {
|
26 |
"liver": os.path.join(THIS_DIR, 'checkpoints', 'liver_3DSegResNetVAE.pth'),
|
27 |
-
"tumor": os.path.join(THIS_DIR, 'checkpoints', '
|
28 |
}
|
29 |
cache_path = {
|
30 |
"liver mask": "liver_mask.npy",
|
31 |
"tumor mask": "tumor_mask.npy"
|
32 |
}
|
33 |
-
device = "cpu"
|
34 |
mydict = {}
|
35 |
|
36 |
|
@@ -149,22 +157,20 @@ def segment_tumor(image_name):
|
|
149 |
|
150 |
postprocessing_tumor = Compose([
|
151 |
Activations(sigmoid=True),
|
152 |
-
# Convert to binary predictions
|
153 |
AsDiscrete(argmax=True, to_onehot=3),
|
154 |
-
|
155 |
-
|
156 |
-
# Fill holes in the binary mask for 1=liver and 2=tumor
|
157 |
-
FillHoles(applied_labels=[2]),
|
158 |
ToTensor()
|
159 |
])
|
160 |
|
161 |
# Preprocessing
|
162 |
input = preprocessing_tumor(input)
|
163 |
-
|
|
|
164 |
|
165 |
# Generate segmentation
|
166 |
with torch.no_grad():
|
167 |
-
segmented_mask = sw_inference(tumor_model, input[None, None, :], (256,256,32), False, discard_second_output=True, overlap=
|
168 |
|
169 |
# Postprocess image
|
170 |
segmented_mask = postprocessing_tumor(segmented_mask)[-1].numpy() # background, liver, tumor
|
@@ -198,13 +204,9 @@ def segment_liver(image_name):
|
|
198 |
])
|
199 |
|
200 |
postprocessing_liver = Compose([
|
201 |
-
# Apply softmax activation to convert logits to probabilities
|
202 |
Activations(sigmoid=True),
|
203 |
-
# Convert predicted probabilities to discrete values (0 or 1)
|
204 |
AsDiscrete(argmax=True, to_onehot=None),
|
205 |
-
# Remove small connected components for 1=liver and 2=tumor
|
206 |
KeepLargestConnectedComponent(applied_labels=[1]),
|
207 |
-
# Fill holes in the binary mask for 1=liver and 2=tumor
|
208 |
FillHoles(applied_labels=[1]),
|
209 |
ToTensor()
|
210 |
])
|
@@ -214,7 +216,7 @@ def segment_liver(image_name):
|
|
214 |
|
215 |
# Generate segmentation
|
216 |
with torch.no_grad():
|
217 |
-
segmented_mask = sw_inference(liver_model, input[None, None, :], (512,512,16), False, discard_second_output=True, overlap=0.
|
218 |
|
219 |
# Postprocess image
|
220 |
segmented_mask = postprocessing_liver(segmented_mask)[0].numpy() # first channel
|
@@ -254,7 +256,7 @@ def generate_summary(image):
|
|
254 |
image_name = image.name.split('/')[-1].replace(".nrrd","")
|
255 |
|
256 |
if "liver mask" not in mydict[image_name] or "tumor mask" not in mydict[image_name]:
|
257 |
-
return "β You need to generate both liver and tumor masks before we can create a summary report.", "
|
258 |
|
259 |
# extract tumor features from CT scan
|
260 |
features = generate_features(mydict[image_name]["img"], mydict[image_name]["liver mask"], mydict[image_name]["tumor mask"])
|
@@ -270,14 +272,12 @@ def generate_summary(image):
|
|
270 |
|
271 |
# openai.api_key = os.environ["OPENAI"]
|
272 |
system_msg = """
|
273 |
-
You are a radiologist. You
|
274 |
-
The report should include recommendations for next steps, and a disclaimer that these results should be taken with a grain of salt.
|
275 |
"""
|
276 |
|
277 |
user_msg = f"""
|
278 |
-
The tumor
|
279 |
-
{str(features)}
|
280 |
-
Please provide your interpretation of the findings and a differential diagnosis, considering the possibility of liver cancer (hepatocellular carcinoma or metastatic liver lesions).
|
281 |
"""
|
282 |
print(user_msg)
|
283 |
|
@@ -294,7 +294,7 @@ def generate_summary(image):
|
|
294 |
report = response["choices"][0]["message"]["content"]
|
295 |
return "π Your AI diagnosis summary report is generated! Please review below. Thank you for trying this tool!", report
|
296 |
except Exception as e:
|
297 |
-
return "Sorry. There was an error in report generation: " + e, "
|
298 |
|
299 |
|
300 |
with gr.Blocks() as app:
|
@@ -322,7 +322,7 @@ with gr.Blocks() as app:
|
|
322 |
btn_upload = gr.Button("Upload")
|
323 |
|
324 |
with gr.Column(scale=2):
|
325 |
-
selected_mask = gr.CheckboxGroup(label='Step 2: Select mask to produce', choices=['liver mask', 'tumor mask'], value = ['liver mask'])
|
326 |
btn_segment = gr.Button("Generate Segmentation")
|
327 |
|
328 |
with gr.Row():
|
|
|
17 |
|
18 |
# global params
|
19 |
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
20 |
+
SW_OVERLAP = 0.50
|
21 |
examples_path = [
|
22 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_003.nrrd'),
|
23 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_006.nrrd'),
|
24 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_007.nrrd'),
|
25 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_018.nrrd'),
|
26 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_020.nrrd'), # bad
|
27 |
+
os.path.join(THIS_DIR, 'examples', 'HCC_036.nrrd'), #
|
28 |
+
os.path.join(THIS_DIR, 'examples', 'HCC_041.nrrd'), # good
|
29 |
+
os.path.join(THIS_DIR, 'examples', 'HCC_051.nrrd'), # ok, rerun with 0.3
|
30 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_066.nrrd'), # very bad
|
31 |
+
#os.path.join(THIS_DIR, 'examples', 'HCC_099.nrrd'), # bad
|
32 |
]
|
33 |
models_path = {
|
34 |
"liver": os.path.join(THIS_DIR, 'checkpoints', 'liver_3DSegResNetVAE.pth'),
|
35 |
+
"tumor": os.path.join(THIS_DIR, 'checkpoints', 'tumor_3DSegResNetVAE_weak_morp.pth')
|
36 |
}
|
37 |
cache_path = {
|
38 |
"liver mask": "liver_mask.npy",
|
39 |
"tumor mask": "tumor_mask.npy"
|
40 |
}
|
41 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
42 |
mydict = {}
|
43 |
|
44 |
|
|
|
157 |
|
158 |
postprocessing_tumor = Compose([
|
159 |
Activations(sigmoid=True),
|
|
|
160 |
AsDiscrete(argmax=True, to_onehot=3),
|
161 |
+
KeepLargestConnectedComponent(applied_labels=[1,2]),
|
162 |
+
FillHoles(applied_labels=[1,2]),
|
|
|
|
|
163 |
ToTensor()
|
164 |
])
|
165 |
|
166 |
# Preprocessing
|
167 |
input = preprocessing_tumor(input)
|
168 |
+
# mask non-liver regions
|
169 |
+
input = torch.multiply(input, torch.from_numpy(mydict[image_name]['liver mask']))
|
170 |
|
171 |
# Generate segmentation
|
172 |
with torch.no_grad():
|
173 |
+
segmented_mask = sw_inference(tumor_model, input[None, None, :], (256,256,32), False, discard_second_output=True, overlap=SW_OVERLAP)[0] # input dimensions [B,C,H,W,Z]
|
174 |
|
175 |
# Postprocess image
|
176 |
segmented_mask = postprocessing_tumor(segmented_mask)[-1].numpy() # background, liver, tumor
|
|
|
204 |
])
|
205 |
|
206 |
postprocessing_liver = Compose([
|
|
|
207 |
Activations(sigmoid=True),
|
|
|
208 |
AsDiscrete(argmax=True, to_onehot=None),
|
|
|
209 |
KeepLargestConnectedComponent(applied_labels=[1]),
|
|
|
210 |
FillHoles(applied_labels=[1]),
|
211 |
ToTensor()
|
212 |
])
|
|
|
216 |
|
217 |
# Generate segmentation
|
218 |
with torch.no_grad():
|
219 |
+
segmented_mask = sw_inference(liver_model, input[None, None, :], (512,512,16), False, discard_second_output=True, overlap=0.25)[0] # input dimensions [B,C,H,W,Z]
|
220 |
|
221 |
# Postprocess image
|
222 |
segmented_mask = postprocessing_liver(segmented_mask)[0].numpy() # first channel
|
|
|
256 |
image_name = image.name.split('/')[-1].replace(".nrrd","")
|
257 |
|
258 |
if "liver mask" not in mydict[image_name] or "tumor mask" not in mydict[image_name]:
|
259 |
+
return "β You need to generate both liver and tumor masks before we can create a summary report.", "You need to generate both liver and tumor masks before we can create a summary report."
|
260 |
|
261 |
# extract tumor features from CT scan
|
262 |
features = generate_features(mydict[image_name]["img"], mydict[image_name]["liver mask"], mydict[image_name]["tumor mask"])
|
|
|
272 |
|
273 |
# openai.api_key = os.environ["OPENAI"]
|
274 |
system_msg = """
|
275 |
+
You are a radiologist. You need to write a diagnosis summary (1-2 paragraphs) given tumor characteristics observed from CT scans.
|
276 |
+
The report should include your diagnosis, considering the possibility of liver cancer (hepatocellular carcinoma or metastatic liver lesions), recommendations for next steps, and a disclaimer that these results should be taken with a grain of salt.
|
277 |
"""
|
278 |
|
279 |
user_msg = f"""
|
280 |
+
The characteristics of this tumor are: {str(features)}. Please provide your diagnosis summary.
|
|
|
|
|
281 |
"""
|
282 |
print(user_msg)
|
283 |
|
|
|
294 |
report = response["choices"][0]["message"]["content"]
|
295 |
return "π Your AI diagnosis summary report is generated! Please review below. Thank you for trying this tool!", report
|
296 |
except Exception as e:
|
297 |
+
return "Sorry. There was an error in report generation: " + e, "Sorry. There was an error in report generation: " + e
|
298 |
|
299 |
|
300 |
with gr.Blocks() as app:
|
|
|
322 |
btn_upload = gr.Button("Upload")
|
323 |
|
324 |
with gr.Column(scale=2):
|
325 |
+
selected_mask = gr.CheckboxGroup(label='Step 2: Select mask to produce', choices=['liver mask', 'tumor mask'], value = ['liver mask', 'tumor mask'])
|
326 |
btn_segment = gr.Button("Generate Segmentation")
|
327 |
|
328 |
with gr.Row():
|