from fastai.vision.all import * import numpy import gradio as gr import PIL from PIL import Image, ImageEnhance import torchvision.transforms as T import matplotlib.pyplot as plt import pathlib plat = platform.system() if plat == 'Linux': pathlib.WindowsPath = pathlib.PosixPath def name_to_hrs (r): return float(round(float(os.path.basename(r)[0:-4].split("_")[1][1:])*(minutes/60)+5,2)) def validation_split (r): return os.path.basename(r)[0:-4].split("_")[3] == "R0003" or os.path.basename(r)[0:-4].split("_")[3] == "R0006" def get_label_filename(name): return path/'labels'/f'{name.stem}_annotationLabels.tif' #zebrafish_age_predictor= vision_learner().load('zebrafish_age_20220726.pkl') #zebrafish_classifier = unet_learner().load('fish_yolk_segmentation_20220726.pkl') zebrafish_age_predictor = load_learner('zebrafish_age_20220726.pkl') zebrafish_classifier = load_learner('fish_yolk_segmentation_20220726.pkl') title = "Zebrafish segmenter and age predictor" description = "An rgb grayscale zebrafish fluorescence image segmenter and age predictor created with fastai. Created as a demo for Gradio and HuggingFace Spaces. Gradio does not display .tif files - those will only show up in the output. The input will be blank unless the file is .jpg or .png." examples = ["early1.png","early2.png","early3.png", "late1.png", "late2.png", "late3.png", "mid1.png", "mid2.png","mid3.png"] def process_zebrafish_image(img): age,tensor, tensor=zebrafish_age_predictor.predict(img) pred,pred_idx,probs=zebrafish_classifier.predict(img) img = img*5 img = PILImage.create(img) #img = PILImage.create('24hr.tif') _,axs = plt.subplots(1,3, figsize=(16,4)) img.show(ctx=axs[0], title='image') pred.show(alpha=1, ctx=axs[1], vmin=0, vmax=3, title='mask') img.show(ctx=axs[2], title='superimposed') pred.show(ctx=axs[2], vmin=0, vmax=3); fig = plt.gcf() fig.canvas.draw() image_out = PIL.Image.frombytes('RGB', fig.canvas.get_width_height(),fig.canvas.tostring_rgb()) text_out = "Age prediction "+ str(round(age[0], 2))+" hrs post fertilization" return (image_out, text_out ) css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}" intf = gr.Interface(fn=process_zebrafish_image, inputs=gr.inputs.Image(shape=(512, 512)), outputs=['image', 'text'], title = title, description=description, examples= examples, css = css).launch(debug=True, share=True)