Spaces:
Sleeping
Sleeping
File size: 3,157 Bytes
c2b5c4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import gradio as gr
import spaces
import torch
from transformers import AutoModel
def crop_mammo(img, model, device):
img_shape = torch.tensor([img.shape[:2]]).to(device)
x = model.preprocess(img)
x = torch.from_numpy(x).expand(1, 1, -1, -1).float().to(device)
with torch.inference_mode():
coords = model(x, img_shape)
coords = coords[0].cpu().numpy()
x, y, w, h = coords
return img[y : y + h, x : x + w]
@spaces.GPU
def predict(cc, mlo):
input_dict = {}
if cc is not None:
input_dict["cc"] = crop_mammo(cc, crop_model, device)
if mlo is not None:
input_dict["mlo"] = crop_mammo(mlo, crop_model, device)
with torch.inference_mode():
output = model(input_dict, device=device)
cancer_pred = {"Cancer Score": output["cancer"][0].item()}
density_labels = ["A", "B", "C", "D"]
density_pred = {label: score for label, score in zip(density_labels, output["density"][0].cpu().numpy())}
return cancer_pred, density_pred
cc_view = gr.Image(label="CC View", image_mode="L")
mlo_view = gr.Image(label="MLO View", image_mode="L")
cancer_label = gr.Label(label="Cancer", show_label=True, show_heading=False)
density_label = gr.Label(label="Density", show_label=True, show_heading=True)
with gr.Blocks() as demo:
gr.Markdown(
"""
# Deep Learning Model for Screening Mammography
This model predicts the likelihood of breast cancer from a standard two-view 2D screening mammography study, as well as breast density.
Read more about the model here:
<https://huggingface.co/ianpan/mammoscreen>
This model was trained on pathology results (cancer versus no cancer) and does not produce a BI-RADS score. Supplying both CC and MLO
views will result in the best prediction. However, the model will still work if only 1 view is provided.
The example mammogram is taken from:
Mohammad Niknejad, <a href="https://radiopaedia.org/?lang=us">Radiopaedia.org</a>, from the case <a href="https://radiopaedia.org/cases/147729?lang=us">rID: 147729</a>.
This model is for demonstration purposes only and has NOT been approved by any regulatory agency for clinical use. The user assumes
any and all responsibility regarding their own use of this model and its outputs. Do NOT upload any images containing protected
health information, as this demonstration is not compliant with patient privacy laws.
Created by: Ian Pan, <https://ianpan.me>
Last updated: January 20, 2025
"""
)
gr.Interface(
fn=predict,
inputs=[cc_view, mlo_view],
outputs=[cancer_label, density_label],
examples=[["examples/cc.jpg", "examples/mlo.jpg"]],
cache_examples=True,
)
if __name__ == "__main__":
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device `{device}` ...")
crop_model = AutoModel.from_pretrained("ianpan/mammo-crop", trust_remote_code=True)
model = AutoModel.from_pretrained("ianpan/mammoscreen", trust_remote_code=True)
crop_model, model = crop_model.eval().to(device), model.eval().to(device)
demo.launch(share=True)
|