Spaces:
Runtime error
Runtime error
DDingcheol
commited on
Commit
โข
2b99c72
1
Parent(s):
0f6c099
Update app.py
Browse files
app.py
CHANGED
@@ -1,26 +1,80 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
3 |
import numpy as np
|
|
|
4 |
import tensorflow as tf
|
5 |
-
from transformers import
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
model =
|
|
|
|
|
11 |
|
12 |
-
def
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
def
|
21 |
-
|
|
|
22 |
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
input_img = Image.fromarray(input_img)
|
25 |
|
26 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
@@ -33,48 +87,24 @@ def huggingface_model(input_img):
|
|
33 |
) # We reverse the shape of `image` because `image.size` returns width and height.
|
34 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
[
|
41 |
-
[102, 102, 156],
|
42 |
-
[190, 153, 153],
|
43 |
-
[153, 153, 153],
|
44 |
-
[250, 170, 30],
|
45 |
-
[220, 220, 0],
|
46 |
-
[107, 142, 35],
|
47 |
-
[152, 251, 152],
|
48 |
-
[0, 130, 180],
|
49 |
-
[220, 20, 60],
|
50 |
-
[255, 0, 0],
|
51 |
-
[0, 0, 142],
|
52 |
-
[0, 0, 70],
|
53 |
-
[0, 60, 100],
|
54 |
-
[0, 80, 100],
|
55 |
-
[0, 0, 230],
|
56 |
-
[119, 11, 32],
|
57 |
-
]
|
58 |
-
|
59 |
-
color_seg = label_to_color_image(seg, colormap)
|
60 |
|
61 |
# Show image + mask
|
62 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
63 |
pred_img = pred_img.astype(np.uint8)
|
64 |
|
65 |
-
|
66 |
-
fig = draw_plot(pred_img, seg, colormap, labels_list)
|
67 |
return fig
|
68 |
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
71 |
|
72 |
-
demo = gr.Interface(
|
73 |
-
fn=huggingface_model,
|
74 |
-
inputs=gr.Image(shape=(1024, 1024)), # ์
๋ ฅ ์ด๋ฏธ์ง ํฌ๊ธฐ๋ ๋ชจ๋ธ์ ์
๋ ฅ ํฌ๊ธฐ์ ๋ง๊ฒ ์กฐ์ ํด์ผ ํฉ๋๋ค.
|
75 |
-
outputs=["plot"],
|
76 |
-
examples=["person-1.jpg", "person-2.jpg", "person-3.jpg", "person-4.jpg", "person-5.jpg"],
|
77 |
-
allow_flagging='never'
|
78 |
-
)
|
79 |
|
80 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
|
3 |
+
from matplotlib import gridspec
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
import tensorflow as tf
|
8 |
+
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
+
"nvidia/segformer-b0-finetuned-cityscapes-1024-1024"
|
12 |
+
)
|
13 |
+
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
+
"nvidia/segformer-b0-finetuned-cityscapes-1024-1024"
|
15 |
+
)
|
16 |
|
17 |
+
def ade_palette():
|
18 |
+
"""ADE20K palette that maps each class to RGB values."""
|
19 |
+
return [
|
20 |
+
[255, 0, 0],
|
21 |
+
[255, 187, 0],
|
22 |
+
[255, 228, 0],
|
23 |
+
[29, 219, 22],
|
24 |
+
[178, 204, 255],
|
25 |
+
[1, 0, 255],
|
26 |
+
[165, 102, 255],
|
27 |
+
[217, 65, 197],
|
28 |
+
[116, 116, 116],
|
29 |
+
[204, 114, 61],
|
30 |
+
[206, 242, 121],
|
31 |
+
[61, 183, 204],
|
32 |
+
[94, 94, 94],
|
33 |
+
[196, 183, 59],
|
34 |
+
[246, 246, 246],
|
35 |
+
[209, 178, 255],
|
36 |
+
[0, 87, 102],
|
37 |
+
[153, 0, 76]
|
38 |
+
]
|
39 |
+
|
40 |
+
labels_list = []
|
41 |
+
|
42 |
+
with open(r'labels.txt', 'r') as fp:
|
43 |
+
for line in fp:
|
44 |
+
labels_list.append(line[:-1])
|
45 |
+
|
46 |
+
colormap = np.asarray(ade_palette())
|
47 |
|
48 |
+
def label_to_color_image(label):
|
49 |
+
if label.ndim != 2:
|
50 |
+
raise ValueError("Expect 2-D input label")
|
51 |
|
52 |
+
if np.max(label) >= len(colormap):
|
53 |
+
raise ValueError("label value too large.")
|
54 |
+
return colormap[label]
|
55 |
+
|
56 |
+
def draw_plot(pred_img, seg):
|
57 |
+
fig = plt.figure(figsize=(20, 15))
|
58 |
+
|
59 |
+
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
60 |
+
|
61 |
+
plt.subplot(grid_spec[0])
|
62 |
+
plt.imshow(pred_img)
|
63 |
+
plt.axis('off')
|
64 |
+
LABEL_NAMES = np.asarray(labels_list)
|
65 |
+
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
66 |
+
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
67 |
+
|
68 |
+
unique_labels = np.unique(seg.numpy().astype("uint8"))
|
69 |
+
ax = plt.subplot(grid_spec[1])
|
70 |
+
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
71 |
+
ax.yaxis.tick_right()
|
72 |
+
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
73 |
+
plt.xticks([], [])
|
74 |
+
ax.tick_params(width=0.0, labelsize=25)
|
75 |
+
return fig
|
76 |
+
|
77 |
+
def sepia(input_img):
|
78 |
input_img = Image.fromarray(input_img)
|
79 |
|
80 |
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
|
|
87 |
) # We reverse the shape of `image` because `image.size` returns width and height.
|
88 |
seg = tf.math.argmax(logits, axis=-1)[0]
|
89 |
|
90 |
+
color_seg = np.zeros(
|
91 |
+
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
92 |
+
) # height, width, 3
|
93 |
+
for label, color in enumerate(colormap):
|
94 |
+
color_seg[seg.numpy() == label, :] = color
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
# Show image + mask
|
97 |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
98 |
pred_img = pred_img.astype(np.uint8)
|
99 |
|
100 |
+
fig = draw_plot(pred_img, seg)
|
|
|
101 |
return fig
|
102 |
|
103 |
+
demo = gr.Interface(fn=sepia,
|
104 |
+
inputs=gr.Image(shape=(400, 600)),
|
105 |
+
outputs=['plot'],
|
106 |
+
examples=["citiscpae-1.jpg", "citiscape-2.jpg"],
|
107 |
+
allow_flagging='never')
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
demo.launch()
|