import torch from monai.networks.nets import DenseNet121 import gradio as gr #from PIL import Image model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=6) model.load_state_dict(torch.load('weights/mednist_model.pth', map_location=torch.device('cpu'))) from monai.transforms import ( EnsureChannelFirst, Compose, LoadImage, ScaleIntensity, ) test_transforms = Compose( [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()] ) class_names = [ 'AbdomenCT', 'BreastMRI', 'CXR', 'ChestCT', 'Hand', 'HeadCT' ] import os, glob #examples_dir = './samples' #example_files = glob.glob(os.path.join(examples_dir, '*.jpg')) def classify_image(image_filepath): input = test_transforms(image_filepath) model.eval() with torch.no_grad(): pred = model(input.unsqueeze(dim=0)) prob = torch.nn.functional.softmax(pred[0], dim=0) confidences = {class_names[i]: float(prob[i]) for i in range(6)} print(confidences) return confidences with gr.Blocks(title="Medical Image Classification- ClassCat", css=".gradio-container {background:mintcream;}" ) as demo: gr.HTML("""
Medical Image Classification with MONAI
""") with gr.Row(): input_image = gr.Image(type="filepath", image_mode="L", shape=(64, 64)) output_label=gr.Label(label="Probabilities", num_top_classes=3) send_btn = gr.Button("Infer") send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label) with gr.Row(): gr.Examples(['./samples/mednist_AbdomenCT00.png'], label='Sample images : AbdomenCT', inputs=input_image) gr.Examples(['./samples/mednist_CXR02.png'], label='CXR', inputs=input_image) gr.Examples(['./samples/mednist_ChestCT08.png'], label='ChestCT', inputs=input_image) gr.Examples(['./samples/mednist_Hand01.png'], label='Hand', inputs=input_image) gr.Examples(['./samples/mednist_HeadCT07.png'], label='HeadCT', inputs=input_image) #demo.queue(concurrency_count=3) demo.launch(debug=True)