deepflash2 / app.py
matjesg's picture
Update app.py
3d1ecfb
raw
history blame
1.71 kB
import numpy as np
import gradio as gr
import torch
from huggingface_hub import hf_hub_download
def inference(repo_id, model_name, img):
model_path = hf_hub_download(repo_id=repo_id, filename=model_name)
model = torch.jit.load(model_path, map_location='cpu')
n_channels = len(model.norm.mean)
# Remove redundant channels
img = img[...,:n_channels]
inp = torch.from_numpy(img).float()
with torch.inference_mode():
argmax, softmax, stdeviation = model(inp)
pred = argmax.cpu().numpy()
std = stdeviation.cpu().numpy()
# Adjust mask for binary cases
if model.num_classes==2: pred *= 255
return pred, std
title="deepflash2"
description='deepflash2 is a deep-learning pipeline for the segmentation of ambiguous microscopic images.\n deepflash2 uses deep model ensembles to achieve more accurate and reliable results. Thus, inference time will be more than a minute in this space.'
examples=[['matjesg/deepflash2_demo', 'cFOS_in_HC_ensemble_small.pt', 'cFOS_example.png'],
['matjesg/deepflash2_demo', 'YFP_in_CTX_ensemble_small.pt', 'YFP_example.png']
]
gr.Interface(inference,
[gr.inputs.Textbox(placeholder='e.g., matjesg/cFOS_in_HC', label='repo_id'),
gr.inputs.Textbox(placeholder='e.g., cFOS_in_HC_ensemble.pt', label='model_name'),
gr.inputs.Image(type='numpy', label='Input image')
],
[gr.outputs.Image(type='numpy', label='Segmentation Mask'),
gr.outputs.Image(type='numpy', label='Uncertainty Map')],
title=title,
description=description,
examples=examples,
).launch()