Spaces:
Runtime error
Runtime error
File size: 1,709 Bytes
807aa27 7a7548a 807aa27 70aa6d4 e271ada 7a7548a 807aa27 7a7548a 9365e4c 807aa27 95eedf0 7a7d946 7a7548a 807aa27 70aa6d4 3d1ecfb 6b37358 807aa27 70aa6d4 a9b1698 70aa6d4 3279510 807aa27 8d497a2 b345345 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import numpy as np
import gradio as gr
import torch
from huggingface_hub import hf_hub_download
def inference(repo_id, model_name, img):
model_path = hf_hub_download(repo_id=repo_id, filename=model_name)
model = torch.jit.load(model_path, map_location='cpu')
n_channels = len(model.norm.mean)
# Remove redundant channels
img = img[...,:n_channels]
inp = torch.from_numpy(img).float()
with torch.inference_mode():
argmax, softmax, stdeviation = model(inp)
pred = argmax.cpu().numpy()
std = stdeviation.cpu().numpy()
# Adjust mask for binary cases
if model.num_classes==2: pred *= 255
return pred, std
title="deepflash2"
description='deepflash2 is a deep-learning pipeline for the segmentation of ambiguous microscopic images.\n deepflash2 uses deep model ensembles to achieve more accurate and reliable results. Thus, inference time will be more than a minute in this space.'
examples=[['matjesg/deepflash2_demo', 'cFOS_in_HC_ensemble_small.pt', 'cFOS_example.png'],
['matjesg/deepflash2_demo', 'YFP_in_CTX_ensemble_small.pt', 'YFP_example.png']
]
gr.Interface(inference,
[gr.inputs.Textbox(placeholder='e.g., matjesg/cFOS_in_HC', label='repo_id'),
gr.inputs.Textbox(placeholder='e.g., cFOS_in_HC_ensemble.pt', label='model_name'),
gr.inputs.Image(type='numpy', label='Input image')
],
[gr.outputs.Image(type='numpy', label='Segmentation Mask'),
gr.outputs.Image(type='numpy', label='Uncertainty Map')],
title=title,
description=description,
examples=examples,
).launch() |