Karin0616
commited on
Commit
•
e3cea39
1
Parent(s):
0960380
annotated test
Browse files
app.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
import random
|
3 |
-
|
4 |
-
from matplotlib import gridspec
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
import numpy as np
|
7 |
from PIL import Image
|
8 |
import tensorflow as tf
|
|
|
|
|
9 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
10 |
|
11 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
@@ -15,8 +14,8 @@ model = TFSegformerForSemanticSegmentation.from_pretrained(
|
|
15 |
"nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
|
16 |
)
|
17 |
|
18 |
-
def ade_palette():
|
19 |
|
|
|
20 |
return [
|
21 |
[204, 87, 92], # road (Reddish)
|
22 |
[112, 185, 212], # sidewalk (Blue)
|
@@ -37,17 +36,18 @@ def ade_palette():
|
|
37 |
[128, 0, 128], # train (Purple)
|
38 |
[255, 255, 0], # motorcycle (Yellow)
|
39 |
[128, 0, 128] # bicycle (Purple)
|
40 |
-
|
41 |
]
|
42 |
|
43 |
-
labels_list = []
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
48 |
|
49 |
colormap = np.asarray(ade_palette())
|
50 |
|
|
|
51 |
def label_to_color_image(label):
|
52 |
if label.ndim != 2:
|
53 |
raise ValueError("Expect 2-D input label")
|
@@ -56,14 +56,15 @@ def label_to_color_image(label):
|
|
56 |
raise ValueError("label value too large.")
|
57 |
return colormap[label]
|
58 |
|
|
|
59 |
def draw_plot(pred_img, seg):
|
60 |
fig = plt.figure(figsize=(20, 15))
|
61 |
-
|
62 |
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
63 |
|
64 |
plt.subplot(grid_spec[0])
|
65 |
plt.imshow(pred_img)
|
66 |
plt.axis('off')
|
|
|
67 |
LABEL_NAMES = np.asarray(labels_list)
|
68 |
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
69 |
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
@@ -77,6 +78,7 @@ def draw_plot(pred_img, seg):
|
|
77 |
ax.tick_params(width=0.0, labelsize=25)
|
78 |
return fig
|
79 |
|
|
|
80 |
def sepia(input_img):
|
81 |
input_img = Image.fromarray(input_img)
|
82 |
|
@@ -85,9 +87,7 @@ def sepia(input_img):
|
|
85 |
logits = outputs.logits
|
86 |
|
87 |
logits = tf.transpose(logits, [0, 2, 3, 1])
|
88 |
-
logits = tf.image.resize(
|
89 |
-
logits, input_img.size[::-1]
|
90 |
-
) # We reverse the shape of `image` because `image.size` returns width and height.
|
91 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
92 |
|
93 |
color_seg = np.zeros(
|
@@ -96,7 +96,6 @@ def sepia(input_img):
|
|
96 |
for label, color in enumerate(colormap):
|
97 |
color_seg[seg.numpy() == label, :] = color
|
98 |
|
99 |
-
# Show image + mask
|
100 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
101 |
pred_img = pred_img.astype(np.uint8)
|
102 |
|
@@ -104,25 +103,75 @@ def sepia(input_img):
|
|
104 |
return fig
|
105 |
|
106 |
|
|
|
|
|
|
|
|
|
107 |
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
-
|
|
|
125 |
|
126 |
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import random
|
|
|
|
|
|
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
import tensorflow as tf
|
6 |
+
from matplotlib import gridspec
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
|
|
14 |
"nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
|
15 |
)
|
16 |
|
|
|
17 |
|
18 |
+
def ade_palette():
|
19 |
return [
|
20 |
[204, 87, 92], # road (Reddish)
|
21 |
[112, 185, 212], # sidewalk (Blue)
|
|
|
36 |
[128, 0, 128], # train (Purple)
|
37 |
[255, 255, 0], # motorcycle (Yellow)
|
38 |
[128, 0, 128] # bicycle (Purple)
|
|
|
39 |
]
|
40 |
|
|
|
41 |
|
42 |
+
labels_list = [
|
43 |
+
"road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
|
44 |
+
"traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
|
45 |
+
"truck", "bus", "train", "motorcycle", "bicycle"
|
46 |
+
]
|
47 |
|
48 |
colormap = np.asarray(ade_palette())
|
49 |
|
50 |
+
|
51 |
def label_to_color_image(label):
|
52 |
if label.ndim != 2:
|
53 |
raise ValueError("Expect 2-D input label")
|
|
|
56 |
raise ValueError("label value too large.")
|
57 |
return colormap[label]
|
58 |
|
59 |
+
|
60 |
def draw_plot(pred_img, seg):
|
61 |
fig = plt.figure(figsize=(20, 15))
|
|
|
62 |
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
63 |
|
64 |
plt.subplot(grid_spec[0])
|
65 |
plt.imshow(pred_img)
|
66 |
plt.axis('off')
|
67 |
+
|
68 |
LABEL_NAMES = np.asarray(labels_list)
|
69 |
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
70 |
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
|
|
78 |
ax.tick_params(width=0.0, labelsize=25)
|
79 |
return fig
|
80 |
|
81 |
+
|
82 |
def sepia(input_img):
|
83 |
input_img = Image.fromarray(input_img)
|
84 |
|
|
|
87 |
logits = outputs.logits
|
88 |
|
89 |
logits = tf.transpose(logits, [0, 2, 3, 1])
|
90 |
+
logits = tf.image.resize(logits, input_img.size[::-1])
|
|
|
|
|
91 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
92 |
|
93 |
color_seg = np.zeros(
|
|
|
96 |
for label, color in enumerate(colormap):
|
97 |
color_seg[seg.numpy() == label, :] = color
|
98 |
|
|
|
99 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
100 |
pred_img = pred_img.astype(np.uint8)
|
101 |
|
|
|
103 |
return fig
|
104 |
|
105 |
|
106 |
+
with gr.Blocks() as demo:
|
107 |
+
with gr.Row():
|
108 |
+
num_boxes = gr.Slider(1, 1, 1, step=0, label="Number of boxes")
|
109 |
+
num_segments = gr.Slider(0, 19, 1, step=1, label="Number of segments")
|
110 |
|
111 |
+
with gr.Row():
|
112 |
+
img_input = gr.Image()
|
113 |
+
img_output = gr.AnnotatedImage(
|
114 |
+
color_map={
|
115 |
+
"road": "#CC575C",
|
116 |
+
"sidewalk": "#70B9D4",
|
117 |
+
"building": "#C4A07A",
|
118 |
+
"wall": "#6A87F2",
|
119 |
+
"fence": "#5BC0DE",
|
120 |
+
"pole": "#FFC0CB",
|
121 |
+
"traffic light": "#B0E0E6",
|
122 |
+
"traffic sign": "#DE3163",
|
123 |
+
"vegetation": "#8B4513",
|
124 |
+
"terrain": "#FF0000",
|
125 |
+
"sky": "#0000FF",
|
126 |
+
"person": "#FFE4B5",
|
127 |
+
"rider": "#800000",
|
128 |
+
"car": "#008000",
|
129 |
+
"truck": "#FF6347",
|
130 |
+
"bus": "#00FF00",
|
131 |
+
"train": "#800080",
|
132 |
+
"motorcycle": "#FFFF00",
|
133 |
+
"bicycle": "#800080"
|
134 |
+
}
|
135 |
+
)
|
136 |
|
137 |
+
section_btn = gr.Button("Identify Sections")
|
138 |
+
selected_section = gr.Textbox(label="Selected Section")
|
139 |
|
140 |
|
141 |
+
def section(img, num_boxes, num_segments):
|
142 |
+
sections = []
|
143 |
+
|
144 |
+
for a in range(num_boxes):
|
145 |
+
x = random.randint(0, img.shape[1])
|
146 |
+
y = random.randint(0, img.shape[0])
|
147 |
+
w = random.randint(0, img.shape[1] - x)
|
148 |
+
h = random.randint(0, img.shape[0] - y)
|
149 |
+
sections.append(((x, y, x + w, y + h), labels_list[a]))
|
150 |
+
|
151 |
+
for b in range(num_segments):
|
152 |
+
x = random.randint(0, img.shape[1])
|
153 |
+
y = random.randint(0, img.shape[0])
|
154 |
+
r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
|
155 |
+
mask = np.zeros(img.shape[:2])
|
156 |
+
|
157 |
+
for i in range(img.shape[0]):
|
158 |
+
for j in range(img.shape[1]):
|
159 |
+
dist_square = (i - y) ** 2 + (j - x) ** 2
|
160 |
+
if dist_square < r ** 2:
|
161 |
+
mask[i, j] = round((r ** 2 - dist_square) / r ** 2 * 4) / 4
|
162 |
+
|
163 |
+
sections.append((mask, labels_list[b + num_boxes]))
|
164 |
+
|
165 |
+
return (img, sections)
|
166 |
+
|
167 |
|
168 |
+
section_btn.click(section, [img_input, num_boxes, num_segments], img_output)
|
169 |
+
|
170 |
+
|
171 |
+
def select_section(evt: gr.SelectData):
|
172 |
+
return labels_list[evt.index]
|
173 |
+
|
174 |
+
|
175 |
+
img_output.select(select_section, None, selected_section)
|
176 |
+
|
177 |
+
demo.launch()
|