|
import gradio as gr |
|
from PIL import Image |
|
import clipGPT |
|
import vitGPT |
|
import skimage.io as io |
|
import PIL.Image |
|
import difflib |
|
import tester |
|
|
|
|
|
|
|
|
|
def generate_caption_clipgpt(image): |
|
caption = clipGPT.generate_caption_clipgpt(image) |
|
return caption |
|
|
|
def generate_caption_vitgpt(image): |
|
caption = vitGPT.generate_caption(image) |
|
return caption |
|
|
|
def generate_caption_vitCoAtt(image): |
|
caption = tester.main(image) |
|
return caption |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
|
|
gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports π₯π€</h1>") |
|
gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>") |
|
|
|
with gr.Row(): |
|
sample_images = [ |
|
"CXR191_IM-0591-1001.png", |
|
"CXR192_IM-0598-1001.png", |
|
"CXR193_IM-0601-1001.png", |
|
"CXR194_IM-0609-1001.png", |
|
"CXR195_IM-0618-1001.png" |
|
] |
|
|
|
|
|
image = gr.Image(label="Upload Chest X-ray") |
|
|
|
sample_images_gallery = gr.Gallery(value = sample_images,label="Sample Images") |
|
|
|
with gr.Row(): |
|
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model") |
|
generate_button = gr.Button("Generate Caption") |
|
|
|
caption = gr.Textbox(label="Generated Caption") |
|
|
|
def predict(img, model_name): |
|
if model_name == "CLIP-GPT2": |
|
return generate_caption_clipgpt(img) |
|
elif model_name == "ViT-GPT2": |
|
return generate_caption_vitgpt(img) |
|
elif model_name == "ViT-CoAttention": |
|
return generate_caption_vitCoAtt(img) |
|
else: |
|
return "Caption generation for this model is not yet implemented." |
|
|
|
|
|
|
|
generate_button.click(predict, [image, model_choice], caption) |
|
sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) |
|
|
|
|
|
demo.launch() |
|
|