|
import gradio as gr |
|
from PIL import Image |
|
import clipGPT |
|
|
|
|
|
def load_model_1(): |
|
|
|
return None |
|
|
|
|
|
|
|
|
|
def generate_caption_clipgpt(image): |
|
caption = clipGPT.generate_caption_clipgpt(image) |
|
return caption |
|
|
|
|
|
|
|
|
|
sample_images = [ |
|
"CXR191_IM-0591-1001.jpg", |
|
"CXR191_IM-0598-1001.jpg", |
|
"CXR191_IM-0601-1001.jpg", |
|
"CXR191_IM-0609-1001.jpg", |
|
"CXR191_IM-0618-1001.jpg" |
|
] |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
image = gr.Image(label="Upload Chest X-ray") |
|
sample_images_gallery = gr.Gallery(sample_images, label="Sample Images") |
|
with gr.Row(): |
|
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model") |
|
with gr.Row(): |
|
caption = gr.Textbox(label="Generated Caption") |
|
|
|
def predict(img, model_name): |
|
if model_name == "CLIP-GPT2": |
|
return generate_caption_clipgpt(img) |
|
|
|
else: |
|
return "Caption generation for this model is not yet implemented." |
|
|
|
|
|
image.change(predict, [image, model_choice], caption) |
|
sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) |
|
|
|
demo.launch() |
|
|