|
import numpy as np |
|
import gradio as gr |
|
from PIL import Image |
|
|
|
import torch |
|
from transformers import MobileViTFeatureExtractor, MobileViTForSemanticSegmentation |
|
|
|
|
|
model_checkpoint = "apple/deeplabv3-mobilevit-small" |
|
feature_extractor = MobileViTFeatureExtractor.from_pretrained(model_checkpoint) |
|
model = MobileViTForSemanticSegmentation.from_pretrained(model_checkpoint).eval() |
|
|
|
palette = np.array( |
|
[ |
|
[ 0, 0, 0], [192, 0, 0], [ 0, 192, 0], [192, 192, 0], |
|
[ 0, 0, 192], [192, 0, 192], [ 0, 192, 192], [192, 192, 192], |
|
[128, 0, 0], [255, 0, 0], [128, 192, 0], [255, 192, 0], |
|
[128, 0, 192], [255, 0, 192], [128, 192, 192], [255, 192, 192], |
|
[ 0, 128, 0], [192, 128, 0], [ 0, 255, 0], [192, 255, 0], |
|
[ 0, 128, 192] |
|
], |
|
dtype=np.uint8) |
|
|
|
labels = [ |
|
"background", |
|
"aeroplane", |
|
"bicycle", |
|
"bird", |
|
"boat", |
|
"bottle", |
|
"bus", |
|
"car", |
|
"cat", |
|
"chair", |
|
"cow", |
|
"diningtable", |
|
"dog", |
|
"horse", |
|
"motorbike", |
|
"person", |
|
"pottedplant", |
|
"sheep", |
|
"sofa", |
|
"train", |
|
"tvmonitor", |
|
] |
|
|
|
|
|
inverted = [ 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20 ] |
|
labels_colored = [] |
|
for i in range(len(labels)): |
|
r, g, b = palette[i] |
|
label = labels[i] |
|
color = "white" if i in inverted else "black" |
|
text = "<span style='background-color: rgb(%d, %d, %d); color: %s; padding: 2px 4px;'>%s</span>" % (r, g, b, color, label) |
|
labels_colored.append(text) |
|
labels_text = " ".join(labels_colored) |
|
|
|
title = "Semantic Segmentation with MobileViT and DeepLabV3" |
|
|
|
description = """ |
|
The input image is resized and center cropped to 512Γ512 pixels. The segmentation output is 32Γ32 pixels.<br> |
|
This model has been trained on <a href="http://host.robots.ox.ac.uk/pascal/VOC/">Pascal VOC</a>. |
|
The classes are: |
|
""" + labels_text + "</p>" |
|
|
|
article = """ |
|
<div style='margin:20px auto;'> |
|
|
|
<p>Sources:<p> |
|
|
|
<p>π <a href="https://arxiv.org/abs/2110.02178">MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer</a></p> |
|
|
|
<p>ποΈ Original pretrained weights from <a href="https://github.com/apple/ml-cvnets">this GitHub repo</a></p> |
|
|
|
<p>π Example images from <a href="https://huggingface.co/datasets/mishig/sample_images">this dataset</a><p> |
|
|
|
</div> |
|
""" |
|
|
|
examples = [ |
|
["cat-3.jpg"], |
|
["construction-site.jpg"], |
|
["dog-cat.jpg"], |
|
["football-match.jpg"], |
|
] |
|
|
|
|
|
def predict(image): |
|
with torch.no_grad(): |
|
inputs = feature_extractor(image, return_tensors="pt") |
|
outputs = model(**inputs) |
|
|
|
|
|
|
|
resized = (inputs["pixel_values"].numpy().squeeze().transpose(1, 2, 0)[..., ::-1] * 255).astype(np.uint8) |
|
|
|
|
|
classes = outputs.logits.argmax(1).squeeze().numpy().astype(np.uint8) |
|
|
|
|
|
colored = np.zeros((classes.shape[0], classes.shape[1], 3), dtype=np.uint8) |
|
for y in range(classes.shape[0]): |
|
for x in range(classes.shape[1]): |
|
colored[y, x] = palette[classes[y, x]] |
|
|
|
|
|
colored = Image.fromarray(colored) |
|
colored = colored.resize((resized.shape[1], resized.shape[0]), resample=Image.Resampling.NEAREST) |
|
|
|
|
|
mask = (classes != 0) * 255 |
|
mask = Image.fromarray(mask.astype(np.uint8)).convert("RGB") |
|
mask = mask.resize((resized.shape[1], resized.shape[0]), resample=Image.Resampling.NEAREST) |
|
|
|
|
|
resized = Image.fromarray(resized) |
|
highlighted = Image.blend(resized, mask, 0.4) |
|
|
|
|
|
|
|
|
|
return colored, highlighted |
|
|
|
|
|
gr.Interface( |
|
fn=predict, |
|
inputs=gr.inputs.Image(label="Upload image"), |
|
outputs=[gr.outputs.Image(label="Classes"), gr.outputs.Image(label="Overlay")], |
|
title=title, |
|
description=description, |
|
article=article, |
|
examples=examples, |
|
).launch() |
|
|