|
import os |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
import torchvision.transforms as T |
|
from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot |
|
from mmseg.core.evaluation import get_palette |
|
import mmcv |
|
|
|
import gradio as gr |
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
|
|
device = "cpu" |
|
checkpoint_file = hf_hub_download(repo_id="Andy1621/uniformer", filename="upernet_global_small.pth") |
|
config_file = './exp/upernet_global_small/config.py' |
|
|
|
|
|
model = init_segmentor(config_file, checkpoint_file, device='cpu') |
|
|
|
|
|
def set_example_image(example: list) -> dict: |
|
return gr.Image.update(value=example[0]) |
|
|
|
|
|
def inference(img): |
|
result = inference_segmentor(model, img) |
|
res_img = show_result_pyplot(model, img, result, get_palette('ade')) |
|
return res_img |
|
|
|
|
|
demo = gr.Blocks() |
|
with demo: |
|
gr.Markdown( |
|
""" |
|
# UniFormer-S |
|
Gradio demo for <a href='https://github.com/Sense-X/UniFormer' target='_blank'>UniFormer</a>: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. |
|
""" |
|
) |
|
|
|
with gr.Box(): |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
input_image = gr.Image(label='Input Image', type='numpy') |
|
with gr.Row(): |
|
submit_button = gr.Button('Submit') |
|
with gr.Column(): |
|
res_image = gr.Image(type='numpy', label='Segmentation Resutls') |
|
with gr.Row(): |
|
example_images = gr.Dataset(components=[input_image], samples=[['demo1.jpg'], ['demo2.jpg'], ['demo3.jpg']]) |
|
|
|
gr.Markdown( |
|
""" |
|
<p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p> |
|
""" |
|
) |
|
|
|
submit_button.click(fn=inference, inputs=input_image, outputs=res_image) |
|
example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components) |
|
|
|
demo.launch(enable_queue=True) |