practice3 / app.py
Karin0616
last
6c3a687
import gradio as gr
import random
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
feature_extractor = SegformerFeatureExtractor.from_pretrained(
"nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
)
model = TFSegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
)
def palette():
return [
[204, 87, 92], # road (Reddish)
[112, 185, 212], # sidewalk (Blue)
[196, 160, 122], # building (Brown)
[106, 135, 242], # wall (Light Blue)
[91, 192, 222], # fence (Turquoise)
[255, 192, 203], # pole (Pink)
[176, 224, 230], # traffic light (Light Blue)
[222, 49, 99], # traffic sign (Red)
[139, 69, 19], # vegetation (Brown)
[255, 0, 0], # terrain (Red)
[0, 0, 255], # sky (Blue)
[255, 228, 181], # person (Peach)
[128, 0, 0], # rider (Maroon)
[0, 128, 0], # car (Green)
[255, 99, 71], # truck (Tomato)
[0, 255, 0], # bus (Lime)
[128, 0, 128], # train (Purple)
[255, 255, 0], # motorcycle (Yellow)
[128, 0, 128] # bicycle (Purple)
]
labels_list = []
with open(r'labels.txt', 'r') as fp:
for line in fp:
labels_list.append(line[:-1])
colormap = np.asarray(palette())
def label_to_color_image(label):
if label.ndim != 2:
raise ValueError("Expect 2-D input label")
if np.max(label) >= len(colormap):
raise ValueError("label value too large.")
return colormap[label]
def draw_plot(pred_img, seg):
fig = plt.figure(figsize=(20, 15))
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
plt.subplot(grid_spec[0])
plt.imshow(pred_img)
plt.axis('off')
LABEL_NAMES = np.asarray(labels_list)
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
unique_labels = np.unique(seg.numpy().astype("uint8"))
ax = plt.subplot(grid_spec[1])
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
ax.yaxis.tick_left()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0, labelsize=27)
return fig
def sepia(input_img):
input_img = Image.fromarray(input_img)
inputs = feature_extractor(images=input_img, return_tensors="tf")
outputs = model(**inputs)
logits = outputs.logits
logits = tf.transpose(logits, [0, 2, 3, 1])
logits = tf.image.resize(
logits, input_img.size[::-1]
) # We reverse the shape of `image` because `image.size` returns width and height.
seg = tf.math.argmax(logits, axis=-1)[0]
color_seg = np.zeros(
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
) # height, width, 3
for label, color in enumerate(colormap):
color_seg[seg.numpy() == label, :] = color
# Show image + mask
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
pred_img = pred_img.astype(np.uint8)
fig = draw_plot(pred_img, seg)
return fig
demo = gr.Interface(fn=sepia,
inputs=gr.Image(shape=(564,846)),
outputs=['plot'],
live=True,
examples=["city1.jpg","city2.jpg","city3.jpg","city4.jpg","city5.jpg"],
allow_flagging='never',
title="City Image Segmentation Model",
theme="huggingfacedark",
description="This model is a high-performance city image segmentation model based on the Segformer architecture provided by NVIDIA. Specifically, the 'segformer-b5' model, trained on the Cityscapes dataset, excels at performing intricate segmentation on high-resolution images of 1024x1024 pixels. It accurately identifies various urban elements such as roads, buildings, pedestrians, providing visually rich segmentation results.This is a machine learning activity project at Kyunggi University.",
)
demo.launch()