Spaces:
Sleeping
Sleeping
from fastai.vision.all import * | |
import gradio as gr | |
from pathlib import Path | |
import pandas as pd | |
def get_x(row): | |
# All files are assumed to be '.jpg', so we directly return the path with '.jpg' extension | |
return path_image_combined / f"{row['file_name']}.jpg" | |
def get_y(row): | |
return row['Buried ODD'] | |
# Load the models into a dictionary | |
models = { | |
'Ultrasound': load_learner('ODDUltrasound.pkl'), | |
'OCT': load_learner('ODDOCT.pkl'), | |
'Fundus': load_learner('ODDfundus.pkl'), | |
'Fluorescence': load_learner('ODDfluorescence.pkl') | |
} | |
modality_keys = ['Ultrasound', 'OCT', 'Fundus', 'Fluorescence'] | |
def classify_images(img_ultrasound, img_oct, img_fundus, img_fluorescence): | |
imgs = [img_ultrasound, img_oct, img_fundus, img_fluorescence] | |
predictions = [] | |
detailed_predictions = [] # To store detailed predictions for each modality | |
provided_imgs = [img for img in imgs if img is not None] | |
if not provided_imgs: # Check if no images were provided | |
return "Please upload at least one image for prediction." | |
# Convert provided images to PILImage and predict with each model | |
for img, key in zip(imgs, modality_keys): | |
if img is not None: | |
pil_img = PILImage.create(img) | |
pred, _, probs = models[key].predict(pil_img) | |
prob_pred = probs.max() # Get the highest probability score | |
predictions.append(pred) | |
detailed_predictions.append(f"{key}: {pred} ({prob_pred.item()*100:.2f}%)") | |
else: | |
detailed_predictions.append(f"{key}: No image provided") | |
# Calculate the final decision based on provided predictions | |
if predictions: | |
final_decision_count = max(set(predictions), key=predictions.count) | |
final_decision = f"Final Decision: {final_decision_count} based on {len(predictions)} inputs." | |
else: | |
final_decision = "No final decision (Insufficient data)" | |
detailed_predictions.append(final_decision) | |
return "\n".join(detailed_predictions) | |
# Define the Gradio interface inputs | |
inputs = [gr.Image(label=f"{modality} Image") for modality in modality_keys] | |
output = gr.Text(label="Predictions") | |
# Adjust the paths to where you store your example images for each modality | |
example_images = [ | |
['Ultrasound.png', 'OCT.jpg', 'Fundus.jpg', 'FAF.jpg'] | |
] | |
# The rest of your code remains unchanged | |
intf = gr.Interface(fn=classify_images, | |
inputs=inputs, | |
outputs=output, | |
title="ODD Detection from Multiple Imaging Modalities", | |
description="Upload images for each modality (as available). It's not required to upload an image for every input field. At least one image is required for a prediction.", | |
examples=example_images) # Now correctly formatted for multiple inputs | |
intf.launch(share=True) | |