Spaces:
Sleeping
Sleeping
File size: 11,635 Bytes
47d95c0 bb20f6e 759caee 6dc443d 759caee bb20f6e 5f1cd98 6dc443d bb20f6e 5f1cd98 bb20f6e 09980bd bb20f6e 5f1cd98 09980bd 6bce319 5f1cd98 bb20f6e 47d95c0 bb20f6e 759caee bb20f6e 759caee bb20f6e 5f1cd98 bb20f6e 759caee bb20f6e 5f1cd98 bb20f6e 99e8682 b4ee158 bb20f6e 5f1cd98 bb20f6e bf689ce bb20f6e 5f1cd98 bb20f6e bba27b9 bb20f6e 3e38893 759caee 99e8682 759caee bb20f6e 759caee 3e38893 bb20f6e d3a6c3b bb20f6e 759caee bb20f6e 2c15128 bb20f6e 3e38893 759caee bb20f6e bba27b9 bb20f6e 09980bd bba27b9 39f5339 bba27b9 8a96d81 bba27b9 620a7c5 bba27b9 09980bd 787f98f bb20f6e b4ee158 06a1d7b 09980bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 |
from pathlib import Path
from glob import glob
from functools import partial
import numpy as np
import torch
import gradio as gr
import pandas as pd
import re
from model import VariationalGNN
examples_path = "examples"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
correct_preds, wrong_preds = {}, {}
condition_lst = pd.read_csv("data/feature.csv", header = "infer", sep = ",", encoding = "utf-8", dtype=str)
D_LABITEMS = pd.read_csv("data/D_LABITEMS.csv", header = "infer", sep = ",", encoding = "utf-8", dtype=str)
def load_model():
path = r"models/final_model.pt"
kwargs, state = torch.load(path, weights_only=False, map_location=device)
model = VariationalGNN(**kwargs).to(device)
model.load_state_dict(state)
return model
model = load_model()
def _check_patient_csv_format(df: pd.DataFrame):
if not (list(df.columns)[0:2] == ["condition", "value"]):
raise gr.Error(f"Column set [{list(df.columns)}]: not expected.", duration=None)
if condition_lst["condition"].to_list() != df["condition"].to_list():
raise gr.Error(f"Condition set: not expected.", duration=None)
vals = np.sort(df["value"].unique())
if not (vals.ndim == 1 and len(vals) == 2 and all(vals == np.array([0.0, 1.0]))):
raise gr.Error(f"Column 'value': contain invalid values.", duration=None)
def _extract_patient_data_from_name(csv_file_name: str):
patient_file_pat = r"^Patient_(\d+)_\(Label-(alive|dead)\)_\(Predicted-(dead|alive)\).csv$"
csv_name = Path(csv_file_name).name
matches = re.search(patient_file_pat, csv_name)
if matches is None:
return None
else:
return (matches.group(1), matches.group(2), matches.group(3))
def _find_example_csv_files() -> None:
all_csv_files = glob(f'{examples_path}/*.csv', recursive=True)
if len(all_csv_files) == 0:
print("*** No csv files found.")
else:
for one_csv_file in all_csv_files:
matches = _extract_patient_data_from_name(one_csv_file)
if matches:
pat_id, pat_label, pat_predicted = matches
if pat_id in correct_preds or pat_id in wrong_preds:
print(f"*** File [{one_csv_file}]: already processed! How come?")
else:
if pat_label == pat_predicted:
correct_preds[pat_id] = {"label": pat_label,
"predicted": pat_predicted,
"file_name": one_csv_file}
else:
wrong_preds[pat_id] = {"label": pat_label,
"predicted": pat_predicted,
"file_name": one_csv_file}
else:
print(f"*** File [{one_csv_file}]: wrong name.")
_find_example_csv_files()
def _predict(file_path: str):
df = pd.read_csv(f"{file_path}",
header="infer",
sep=",",
encoding="utf-8",
dtype={'condition': 'str', 'value': 'float32'},
keep_default_na=False)
_check_patient_csv_format(df)
patient_data = torch.from_numpy(df["value"].to_numpy()).unsqueeze(dim=0).to(device)
model.eval()
with torch.inference_mode():
probability, _ = model(patient_data)
probability = torch.sigmoid(probability.detach().cpu()[0]).item()
return probability
def example_csv_click(patient_id: int):
print(f"*** Predict patient {patient_id} (Example CSV)")
patient = correct_preds[patient_id] if patient_id in correct_preds else wrong_preds[patient_id]
probability = _predict(patient['file_name'])
return [{"dead": probability, "alive": 1-probability},
patient['label']]
def user_csv_upload(temp_csv_file_path):
print(f"*** Predict patient (User CSV Upload)")
matches = _extract_patient_data_from_name(temp_csv_file_path)
probability = _predict(temp_csv_file_path)
return [{"dead": probability, "alive": 1-probability},
"(Not Available)" if matches is None else matches[1]]
def do_query(query_str, query_type):
if query_type in ["Diagnosis", "Procedure"]:
str_to_search = f"ICD-9 {query_type} Code " + query_str
return gr.HTML(value=f'<a href="https://www.google.com/search?q={str_to_search}" target="_blank">Google</a>',
visible=True)
else: # Lab Code
query_str = query_str.strip()
if (index := query_str.rfind("_")) >= 0:
query_str = query_str[0:index]
res = D_LABITEMS[D_LABITEMS["ITEMID"] == query_str]
if res.shape[0] == 0:
answer = "(Something wrong. No definition found.)"
elif res.shape[0] == 1:
answer = f"{res['LABEL'].values[0]}-{res['FLUID'].values[0]}-{res['CATEGORY'].values[0]}"
else:
answer=f"(Something wrong. Too many definitions, given code [{query_str}].)"
return gr.HTML(value=answer,
visible=True)
def query_input_change_event(query_str, query_type):
if (query_str is not None and len(query_str.strip())>0 and\
query_type is not None):
return [gr.Button(interactive=True), gr.HTML(visible=False)]
else:
return [gr.Button(interactive=False), gr.HTML(visible=False)]
resDispPartFuncs = []
css = \
"""
#selectFileToUpload {max-height: 180px}
.gradio-container {
background: url(https://www.kindpng.com/picc/m/207-2075829_transparent-healthcare-clipart-medical-report-icon-hd-png.png);
background-position: 80% 85%;
background-repeat: no-repeat;
background-size: 200px;
}
#label-label {
height: 50px !important;
}
#label-label > .container {
height: 50px !important;
}
#label-label > .container > h2 {
//height: 50px !important;
padding: 0 !important;
}
"""
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
with gr.Row():
with gr.Column():
gr.Markdown(
"""
## Input:
(See examples for file structure)
"""
)
patient_upload_file = gr.File(label="Upload A Patient",
file_types = ['.csv'],
file_count = "single",
elem_id="selectFileToUpload")
patient_upload_file.upload(fn=user_csv_upload, inputs=patient_upload_file, outputs=None)
gr.Markdown(
"""
## Examples - Correct Prediction:
"""
)
with gr.Row():
for patient_id in correct_preds.keys():
with gr.Column(variant='panel',
min_width=100):
patient_input_btn = gr.Button(f"Patient {patient_id}",
size="sm")
patient_download_btn = gr.DownloadButton(label="Download",
value=f"{correct_preds[patient_id]['file_name']}",
size="sm")
patient_id_num = gr.Number(value=patient_id,
visible=False)
partFunc = partial(patient_input_btn.click,
fn=example_csv_click,
inputs=patient_id_num,
api_name="predict")
resDispPartFuncs.append(partFunc)
gr.Markdown(
"""
## Examples - Wrong Prediction:
"""
)
with gr.Row():
for patient_id in wrong_preds.keys():
with gr.Column(variant='panel',
min_width=100):
patient_input_btn = gr.Button(f"Patient {patient_id}",
size="sm")
patient_download_btn = gr.DownloadButton(label="Download",
value=f"{wrong_preds[patient_id]['file_name']}",
size="sm")
patient_id_num = gr.Number(value=patient_id,
visible=False)
partFunc = partial(patient_input_btn.click,
fn=example_csv_click,
inputs=patient_id_num,
api_name="predict")
resDispPartFuncs.append(partFunc)
with gr.Column():
gr.Markdown(
"""
## Mortality Prediction:
In 24 hours after ICU admission.
"""
)
result_pred = gr.Label(num_top_classes=2, label="Predicted")
result_label = gr.Label(label="Label", elem_id="label-label")
with gr.Accordion("More on Patient Conditions...", open=False):
query_tbx = gr.Textbox(label="Enter one ICD-9 Diagnosis/Procedure Code or Lab Value:",
lines=1,
max_lines=1, placeholder="00869 for 'Other viral intes infec' (Diagnosis)")
query_type = gr.Radio(["Diagnosis", "Procedure", "Lab Value"], show_label=False)
query_btn = gr.Button(value="Query", size="sm", interactive=False)
html = gr.HTML("", visible=False)
query_tbx.change(fn=query_input_change_event, inputs=[query_tbx, query_type], outputs=[query_btn, html])
query_type.change(fn=query_input_change_event, inputs=[query_tbx, query_type], outputs=[query_btn, html])
query_btn.click(fn=do_query, inputs=[query_tbx, query_type], outputs=html)
with gr.Accordion("More on Technical Details...", open=False):
gr.Markdown(
"""
- Paper: [Variationally Regularized Graph-based Representation Learning for Electronic Health Records (Zhu et al, 2021)](https://arxiv.org/abs/1912.03761)
- Dataset: [MIMIC-III](https://physionet.org/content/mimiciii/1.4/)
- 50,314 records, 10,591 features
- 5,315 positive, 44,999 negative (11.8%)
- Split: 80% training, 10% validation, 10% testing
- Notable points:
- Result: AUPRC 0.7027 (Baseline: 0.118) on Val split
- Variational Regularization, inspired by [Kipf et al., 2016](https://arxiv.org/abs/1611.07308)
- Trained on NVIDIA A100 with PyTorch 2.4.0
- Code on GitHub: [pytorch-variational-gcn-ehr-public](https://github.com/ThachNgocTran/pytorch-variational-gcn-ehr-public)
"""
)
with gr.Accordion("More on Training...", open=False):
gr.HTML("""
<img src="/file=images/AUPRC_Training_Graph.png" alt="">
""")
for partialFunc in resDispPartFuncs:
partialFunc(outputs=[result_pred, result_label])
demo.launch(debug=True, allowed_paths=["images/."])
|