Karin0616
commited on
Commit
โข
82e8ec6
1
Parent(s):
c91d842
annotated test
Browse files
app.py
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
@@ -13,33 +16,35 @@ model = TFSegformerForSemanticSegmentation.from_pretrained(
|
|
13 |
)
|
14 |
|
15 |
def ade_palette():
|
|
|
16 |
return [
|
17 |
-
[204, 87, 92],
|
18 |
[112, 185, 212], # sidewalk (Blue)
|
19 |
[196, 160, 122], # building (Brown)
|
20 |
[106, 135, 242], # wall (Light Blue)
|
21 |
-
[91, 192, 222],
|
22 |
[255, 192, 203], # pole (Pink)
|
23 |
[176, 224, 230], # traffic light (Light Blue)
|
24 |
-
[222, 49, 99],
|
25 |
-
[139, 69, 19],
|
26 |
-
[255, 0, 0],
|
27 |
-
[0, 0, 255],
|
28 |
[255, 228, 181], # person (Peach)
|
29 |
-
[128, 0, 0],
|
30 |
-
[0, 128, 0],
|
31 |
-
[255, 99, 71],
|
32 |
-
[0, 255, 0],
|
33 |
-
[128, 0, 128],
|
34 |
-
[255, 255, 0],
|
35 |
-
[128, 0, 128]
|
|
|
36 |
]
|
37 |
|
38 |
-
labels_list = [
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
]
|
43 |
|
44 |
colormap = np.asarray(ade_palette())
|
45 |
|
@@ -51,7 +56,28 @@ def label_to_color_image(label):
|
|
51 |
raise ValueError("label value too large.")
|
52 |
return colormap[label]
|
53 |
|
54 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
input_img = Image.fromarray(input_img)
|
56 |
|
57 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
@@ -67,45 +93,56 @@ def apply_sepia(input_img, selected_labels):
|
|
67 |
color_seg = np.zeros(
|
68 |
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
69 |
) # height, width, 3
|
70 |
-
|
71 |
for label, color in enumerate(colormap):
|
72 |
-
|
73 |
-
color_seg[seg.numpy() == label, :] = color
|
74 |
|
75 |
# Show image + mask
|
76 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
77 |
pred_img = pred_img.astype(np.uint8)
|
78 |
|
79 |
-
fig
|
80 |
-
ax.imshow(pred_img)
|
81 |
-
ax.axis('off')
|
82 |
return fig
|
83 |
|
84 |
-
# Gradio Interface
|
85 |
-
iface = gr.Interface(
|
86 |
-
fn=apply_sepia,
|
87 |
-
inputs=[
|
88 |
-
gr.Image(shape=(564, 846)),
|
89 |
-
gr.CheckboxGroup(
|
90 |
-
choices=[(label, label) for label in labels_list],
|
91 |
-
default=[labels_list[0]],
|
92 |
-
label="Select Labels"
|
93 |
-
)
|
94 |
-
],
|
95 |
-
outputs=gr.AnnotatedImage(type="plot", loc="center", scale=True),
|
96 |
-
live=True,
|
97 |
-
examples=["city1.jpg", "city2.jpg", "city3.jpg"],
|
98 |
-
allow_flagging='never',
|
99 |
-
title="Semantic Segmentation with Color Highlighting",
|
100 |
-
description="Apply sepia effect to the image with color highlighting for selected labels.",
|
101 |
-
theme="darkpeach",
|
102 |
-
css="""
|
103 |
-
body {
|
104 |
-
background-color: dark;
|
105 |
-
color: white;
|
106 |
-
font-family: Arial, sans-serif;
|
107 |
-
}
|
108 |
-
"""
|
109 |
-
)
|
110 |
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import random
|
3 |
+
|
4 |
+
from matplotlib import gridspec
|
5 |
import matplotlib.pyplot as plt
|
6 |
import numpy as np
|
7 |
from PIL import Image
|
|
|
16 |
)
|
17 |
|
18 |
def ade_palette():
|
19 |
+
|
20 |
return [
|
21 |
+
[204, 87, 92], # road (Reddish)
|
22 |
[112, 185, 212], # sidewalk (Blue)
|
23 |
[196, 160, 122], # building (Brown)
|
24 |
[106, 135, 242], # wall (Light Blue)
|
25 |
+
[91, 192, 222], # fence (Turquoise)
|
26 |
[255, 192, 203], # pole (Pink)
|
27 |
[176, 224, 230], # traffic light (Light Blue)
|
28 |
+
[222, 49, 99], # traffic sign (Red)
|
29 |
+
[139, 69, 19], # vegetation (Brown)
|
30 |
+
[255, 0, 0], # terrain (Red)
|
31 |
+
[0, 0, 255], # sky (Blue)
|
32 |
[255, 228, 181], # person (Peach)
|
33 |
+
[128, 0, 0], # rider (Maroon)
|
34 |
+
[0, 128, 0], # car (Green)
|
35 |
+
[255, 99, 71], # truck (Tomato)
|
36 |
+
[0, 255, 0], # bus (Lime)
|
37 |
+
[128, 0, 128], # train (Purple)
|
38 |
+
[255, 255, 0], # motorcycle (Yellow)
|
39 |
+
[128, 0, 128] # bicycle (Purple)
|
40 |
+
|
41 |
]
|
42 |
|
43 |
+
labels_list = []
|
44 |
+
|
45 |
+
with open(r'labels.txt', 'r') as fp:
|
46 |
+
for line in fp:
|
47 |
+
labels_list.append(line[:-1])
|
48 |
|
49 |
colormap = np.asarray(ade_palette())
|
50 |
|
|
|
56 |
raise ValueError("label value too large.")
|
57 |
return colormap[label]
|
58 |
|
59 |
+
def draw_plot(pred_img, seg):
|
60 |
+
fig = plt.figure(figsize=(20, 15))
|
61 |
+
|
62 |
+
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
63 |
+
|
64 |
+
plt.subplot(grid_spec[0])
|
65 |
+
plt.imshow(pred_img)
|
66 |
+
plt.axis('off')
|
67 |
+
LABEL_NAMES = np.asarray(labels_list)
|
68 |
+
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
69 |
+
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
70 |
+
|
71 |
+
unique_labels = np.unique(seg.numpy().astype("uint8"))
|
72 |
+
ax = plt.subplot(grid_spec[1])
|
73 |
+
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
74 |
+
ax.yaxis.tick_right()
|
75 |
+
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
76 |
+
plt.xticks([], [])
|
77 |
+
ax.tick_params(width=0.0, labelsize=25)
|
78 |
+
return fig
|
79 |
+
|
80 |
+
def sepia(input_img):
|
81 |
input_img = Image.fromarray(input_img)
|
82 |
|
83 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
|
|
93 |
color_seg = np.zeros(
|
94 |
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
95 |
) # height, width, 3
|
|
|
96 |
for label, color in enumerate(colormap):
|
97 |
+
color_seg[seg.numpy() == label, :] = color
|
|
|
98 |
|
99 |
# Show image + mask
|
100 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
101 |
pred_img = pred_img.astype(np.uint8)
|
102 |
|
103 |
+
fig = draw_plot(pred_img, seg)
|
|
|
|
|
104 |
return fig
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
+
demo = gr.Interface(fn=sepia,
|
108 |
+
inputs=gr.Image(shape=(564,846)),
|
109 |
+
outputs=['plot',gr.AnnotatedImage(
|
110 |
+
color_map={
|
111 |
+
"road": "#CC575C",
|
112 |
+
"sidewalk": "#70B9D4",
|
113 |
+
"building": "#C4A07A",
|
114 |
+
"wall": "#6A87F2",
|
115 |
+
"fence": "#5BC0DE",
|
116 |
+
"pole": "#FFC0CB",
|
117 |
+
"traffic light": "#B0E0E6",
|
118 |
+
"traffic sign": "#DE3163",
|
119 |
+
"vegetation": "#8B4513",
|
120 |
+
"terrain": "#FF0000",
|
121 |
+
"sky": "#0000FF",
|
122 |
+
"person": "#FFE4B5",
|
123 |
+
"rider": "#800000",
|
124 |
+
"car": "#008000",
|
125 |
+
"truck": "#FF6347",
|
126 |
+
"bus": "#00FF00",
|
127 |
+
"train": "#800080",
|
128 |
+
"motorcycle": "#FFFF00",
|
129 |
+
"bicycle": "#800080"}
|
130 |
+
)],
|
131 |
+
live=True,
|
132 |
+
examples=["city1.jpg","city2.jpg","city3.jpg"],
|
133 |
+
allow_flagging='never',
|
134 |
+
title="This is a machine learning activity project at Kyunggi University.",
|
135 |
+
theme="darkpeach",
|
136 |
+
css="""
|
137 |
+
body {
|
138 |
+
background-color: dark;
|
139 |
+
color: white; /* ํฐํธ ์์ ์์ */
|
140 |
+
font-family: Arial, sans-serif; /* ํฐํธ ํจ๋ฐ๋ฆฌ ์์ */
|
141 |
+
}
|
142 |
+
"""
|
143 |
+
|
144 |
+
)
|
145 |
+
|
146 |
+
|
147 |
+
demo.launch()
|
148 |
+
|