Hantr commited on
Commit
9c9cd08
1 Parent(s): fb3d1dc
app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from matplotlib import gridspec
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+ from PIL import Image
7
+ import tensorflow as tf
8
+ from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
+
10
+ feature_extractor = SegformerFeatureExtractor.from_pretrained(
11
+ "nvidia/segformer-b3-finetuned-cityscapes-1024-1024"
12
+ )
13
+ model = TFSegformerForSemanticSegmentation.from_pretrained(
14
+ "nvidia/segformer-b3-finetuned-cityscapes-1024-1024"
15
+ )
16
+
17
+ def ade_palette():
18
+ """ADE20K palette that maps each class to RGB values."""
19
+ return [
20
+ [255, 0, 0],
21
+ [255, 94, 0],
22
+ [255, 187, 0],
23
+ [255, 228, 0],
24
+ [171, 242, 0],
25
+ [29, 219, 22],
26
+ [0, 216, 255],
27
+ [0, 84, 255],
28
+ [1, 0, 255],
29
+ [95, 0, 255],
30
+ [255, 0, 221],
31
+ [255, 0, 127],
32
+ [255, 167, 167],
33
+ [242, 150, 97],
34
+ [204, 166, 61],
35
+ [153, 138, 0],
36
+ [71, 102, 0],
37
+ [47, 157, 39],
38
+ [116,116,116],
39
+ ]
40
+
41
+ labels_list = []
42
+
43
+ with open(r'labels.txt', 'r') as fp:
44
+ for line in fp:
45
+ labels_list.append(line[:-1])
46
+
47
+ colormap = np.asarray(ade_palette())
48
+
49
+ def label_to_color_image(label):
50
+ if label.ndim != 2:
51
+ raise ValueError("Expect 2-D input label")
52
+
53
+ if np.max(label) >= len(colormap):
54
+ raise ValueError("label value too large.")
55
+ return colormap[label]
56
+
57
+ def draw_plot(pred_img, seg):
58
+ fig = plt.figure(figsize=(20, 15))
59
+
60
+ grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
61
+
62
+ plt.subplot(grid_spec[0])
63
+ plt.imshow(pred_img)
64
+ plt.axis('off')
65
+ LABEL_NAMES = np.asarray(labels_list)
66
+ FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
67
+ FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
68
+
69
+ unique_labels = np.unique(seg.numpy().astype("uint8"))
70
+ ax = plt.subplot(grid_spec[1])
71
+ plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
72
+ ax.yaxis.tick_right()
73
+ plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
74
+ plt.xticks([], [])
75
+ ax.tick_params(width=0.0, labelsize=25)
76
+ return fig
77
+
78
+ def sepia(input_img):
79
+ input_img = Image.fromarray(input_img)
80
+
81
+ inputs = feature_extractor(images=input_img, return_tensors="tf")
82
+ outputs = model(**inputs)
83
+ logits = outputs.logits
84
+
85
+ logits = tf.transpose(logits, [0, 2, 3, 1])
86
+ logits = tf.image.resize(
87
+ logits, input_img.size[::-1]
88
+ ) # We reverse the shape of `image` because `image.size` returns width and height.
89
+ seg = tf.math.argmax(logits, axis=-1)[0]
90
+
91
+ color_seg = np.zeros(
92
+ (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
93
+ ) # height, width, 3
94
+ for label, color in enumerate(colormap):
95
+ color_seg[seg.numpy() == label, :] = color
96
+
97
+ # Show image + mask
98
+ pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
99
+ pred_img = pred_img.astype(np.uint8)
100
+
101
+ fig = draw_plot(pred_img, seg)
102
+ return fig
103
+
104
+ demo = gr.Interface(fn=sepia,
105
+ inputs=gr.Image(shape=(400, 600)),
106
+ outputs=['plot'],
107
+ examples=["cityscapes-1.jpg", "cityscapes-2.jpg", "cityscapes-3.jpg", "person-4.jpg", "person-5.jpg"],
108
+ allow_flagging='never')
109
+
110
+
111
+ demo.launch()
cityscapes-1.jpg ADDED
cityscapes-2.jpg ADDED
cityscapes-3.jpg ADDED
label.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ road
2
+ sidewalk
3
+ building
4
+ wall
5
+ fence
6
+ pole
7
+ traffic light
8
+ traffic sign
9
+ vegetation
10
+ terrain
11
+ sky
12
+ person
13
+ rider
14
+ car
15
+ truck
16
+ bus
17
+ train
18
+ motorcycle
19
+ bicycle
person-4.jpg ADDED
person-5.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ tensorflow
4
+ numpy
5
+ Image
6
+ matplotlib
segmentation2/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
segmentation2/ADE_val_00000001.jpeg ADDED
segmentation2/ADE_val_00001159.jpg ADDED
segmentation2/ADE_val_00001248.jpg ADDED
segmentation2/ADE_val_00001472.jpg ADDED
segmentation2/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Segmentation
3
+ emoji: 👀
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.44.4
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
segmentation2/app.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from matplotlib import gridspec
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+ from PIL import Image
7
+ import tensorflow as tf
8
+ from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
+
10
+ feature_extractor = SegformerFeatureExtractor.from_pretrained(
11
+ "nvidia/segformer-b5-finetuned-ade-640-640"
12
+ )
13
+ model = TFSegformerForSemanticSegmentation.from_pretrained(
14
+ "nvidia/segformer-b5-finetuned-ade-640-640"
15
+ )
16
+
17
+ def ade_palette():
18
+ """ADE20K palette that maps each class to RGB values."""
19
+ return [
20
+ [204, 87, 92],
21
+ [112, 185, 212],
22
+ [45, 189, 106],
23
+ [234, 123, 67],
24
+ [78, 56, 123],
25
+ [210, 32, 89],
26
+ [90, 180, 56],
27
+ [155, 102, 200],
28
+ [33, 147, 176],
29
+ [255, 183, 76],
30
+ [67, 123, 89],
31
+ [190, 60, 45],
32
+ [134, 112, 200],
33
+ [56, 45, 189],
34
+ [200, 56, 123],
35
+ [87, 92, 204],
36
+ [120, 56, 123],
37
+ [45, 78, 123],
38
+ [156, 200, 56],
39
+ [32, 90, 210],
40
+ [56, 123, 67],
41
+ [180, 56, 123],
42
+ [123, 67, 45],
43
+ [45, 134, 200],
44
+ [67, 56, 123],
45
+ [78, 123, 67],
46
+ [32, 210, 90],
47
+ [45, 56, 189],
48
+ [123, 56, 123],
49
+ [56, 156, 200],
50
+ [189, 56, 45],
51
+ [112, 200, 56],
52
+ [56, 123, 45],
53
+ [200, 32, 90],
54
+ [123, 45, 78],
55
+ [200, 156, 56],
56
+ [45, 67, 123],
57
+ [56, 45, 78],
58
+ [45, 56, 123],
59
+ [123, 67, 56],
60
+ [56, 78, 123],
61
+ [210, 90, 32],
62
+ [123, 56, 189],
63
+ [45, 200, 134],
64
+ [67, 123, 56],
65
+ [123, 45, 67],
66
+ [90, 32, 210],
67
+ [200, 45, 78],
68
+ [32, 210, 90],
69
+ [45, 123, 67],
70
+ [165, 42, 87],
71
+ [72, 145, 167],
72
+ [15, 158, 75],
73
+ [209, 89, 40],
74
+ [32, 21, 121],
75
+ [184, 20, 100],
76
+ [56, 135, 15],
77
+ [128, 92, 176],
78
+ [1, 119, 140],
79
+ [220, 151, 43],
80
+ [41, 97, 72],
81
+ [148, 38, 27],
82
+ [107, 86, 176],
83
+ [21, 26, 136],
84
+ [174, 27, 90],
85
+ [91, 96, 204],
86
+ [108, 50, 107],
87
+ [27, 45, 136],
88
+ [168, 200, 52],
89
+ [7, 102, 27],
90
+ [42, 93, 56],
91
+ [140, 52, 112],
92
+ [92, 107, 168],
93
+ [17, 118, 176],
94
+ [59, 50, 174],
95
+ [206, 40, 143],
96
+ [44, 19, 142],
97
+ [23, 168, 75],
98
+ [54, 57, 189],
99
+ [144, 21, 15],
100
+ [15, 176, 35],
101
+ [107, 19, 79],
102
+ [204, 52, 114],
103
+ [48, 173, 83],
104
+ [11, 120, 53],
105
+ [206, 104, 28],
106
+ [20, 31, 153],
107
+ [27, 21, 93],
108
+ [11, 206, 138],
109
+ [112, 30, 83],
110
+ [68, 91, 152],
111
+ [153, 13, 43],
112
+ [25, 114, 54],
113
+ [92, 27, 150],
114
+ [108, 42, 59],
115
+ [194, 77, 5],
116
+ [145, 48, 83],
117
+ [7, 113, 19],
118
+ [25, 92, 113],
119
+ [60, 168, 79],
120
+ [78, 33, 120],
121
+ [89, 176, 205],
122
+ [27, 200, 94],
123
+ [210, 67, 23],
124
+ [123, 89, 189],
125
+ [225, 56, 112],
126
+ [75, 156, 45],
127
+ [172, 104, 200],
128
+ [15, 170, 197],
129
+ [240, 133, 65],
130
+ [89, 156, 112],
131
+ [214, 88, 57],
132
+ [156, 134, 200],
133
+ [78, 57, 189],
134
+ [200, 78, 123],
135
+ [106, 120, 210],
136
+ [145, 56, 112],
137
+ [89, 120, 189],
138
+ [185, 206, 56],
139
+ [47, 99, 28],
140
+ [112, 189, 78],
141
+ [200, 112, 89],
142
+ [89, 145, 112],
143
+ [78, 106, 189],
144
+ [112, 78, 189],
145
+ [156, 112, 78],
146
+ [28, 210, 99],
147
+ [78, 89, 189],
148
+ [189, 78, 57],
149
+ [112, 200, 78],
150
+ [189, 47, 78],
151
+ [205, 112, 57],
152
+ [78, 145, 57],
153
+ [200, 78, 112],
154
+ [99, 89, 145],
155
+ [200, 156, 78],
156
+ [57, 78, 145],
157
+ [78, 57, 99],
158
+ [57, 78, 145],
159
+ [145, 112, 78],
160
+ [78, 89, 145],
161
+ [210, 99, 28],
162
+ [145, 78, 189],
163
+ [57, 200, 136],
164
+ [89, 156, 78],
165
+ [145, 78, 99],
166
+ [99, 28, 210],
167
+ [189, 78, 47],
168
+ [28, 210, 99],
169
+ [78, 145, 57],
170
+ ]
171
+
172
+ labels_list = []
173
+
174
+ with open(r'labels.txt', 'r') as fp:
175
+ for line in fp:
176
+ labels_list.append(line[:-1])
177
+
178
+ colormap = np.asarray(ade_palette())
179
+
180
+ def label_to_color_image(label):
181
+ if label.ndim != 2:
182
+ raise ValueError("Expect 2-D input label")
183
+
184
+ if np.max(label) >= len(colormap):
185
+ raise ValueError("label value too large.")
186
+ return colormap[label]
187
+
188
+ def draw_plot(pred_img, seg):
189
+ fig = plt.figure(figsize=(20, 15))
190
+
191
+ grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
192
+
193
+ plt.subplot(grid_spec[0])
194
+ plt.imshow(pred_img)
195
+ plt.axis('off')
196
+ LABEL_NAMES = np.asarray(labels_list)
197
+ FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
198
+ FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
199
+
200
+ unique_labels = np.unique(seg.numpy().astype("uint8"))
201
+ ax = plt.subplot(grid_spec[1])
202
+ plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
203
+ ax.yaxis.tick_right()
204
+ plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
205
+ plt.xticks([], [])
206
+ ax.tick_params(width=0.0, labelsize=25)
207
+ return fig
208
+
209
+ def sepia(input_img):
210
+ input_img = Image.fromarray(input_img)
211
+
212
+ inputs = feature_extractor(images=input_img, return_tensors="tf")
213
+ outputs = model(**inputs)
214
+ logits = outputs.logits
215
+
216
+ logits = tf.transpose(logits, [0, 2, 3, 1])
217
+ logits = tf.image.resize(
218
+ logits, input_img.size[::-1]
219
+ ) # We reverse the shape of `image` because `image.size` returns width and height.
220
+ seg = tf.math.argmax(logits, axis=-1)[0]
221
+
222
+ color_seg = np.zeros(
223
+ (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
224
+ ) # height, width, 3
225
+ for label, color in enumerate(colormap):
226
+ color_seg[seg.numpy() == label, :] = color
227
+
228
+ # Show image + mask
229
+ pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
230
+ pred_img = pred_img.astype(np.uint8)
231
+
232
+ fig = draw_plot(pred_img, seg)
233
+ return fig
234
+
235
+ demo = gr.Interface(fn=sepia,
236
+ inputs=gr.Image(shape=(400, 600)),
237
+ outputs=['plot'],
238
+ examples=["ADE_val_00000001.jpeg", "ADE_val_00001159.jpg", "ADE_val_00001248.jpg", "ADE_val_00001472.jpg"],
239
+ allow_flagging='never')
240
+
241
+
242
+ demo.launch()
segmentation2/labels.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wall
2
+ building
3
+ sky
4
+ floor
5
+ tree
6
+ ceiling
7
+ road
8
+ bed
9
+ windowpane
10
+ grass
11
+ cabinet
12
+ sidewalk
13
+ person
14
+ earth
15
+ door
16
+ table
17
+ mountain
18
+ plant
19
+ curtain
20
+ chair
21
+ car
22
+ water
23
+ painting
24
+ sofa
25
+ shelf
26
+ house
27
+ sea
28
+ mirror
29
+ rug
30
+ field
31
+ armchair
32
+ seat
33
+ fence
34
+ desk
35
+ rock
36
+ wardrobe
37
+ lamp
38
+ bathtub
39
+ railing
40
+ cushion
41
+ base
42
+ box
43
+ column
44
+ signboard
45
+ chest of drawers
46
+ counter
47
+ sand
48
+ sink
49
+ skyscraper
50
+ fireplace
51
+ refrigerator
52
+ grandstand
53
+ path
54
+ stairs
55
+ runway
56
+ case
57
+ pool table
58
+ pillow
59
+ screen door
60
+ stairway
61
+ river
62
+ bridge
63
+ bookcase
64
+ blind
65
+ coffee table
66
+ toilet
67
+ flower
68
+ book
69
+ hill
70
+ bench
71
+ countertop
72
+ stove
73
+ palm
74
+ kitchen island
75
+ computer
76
+ swivel chair
77
+ boat
78
+ bar
79
+ arcade machine
80
+ hovel
81
+ bus
82
+ towel
83
+ light
84
+ truck
85
+ tower
86
+ chandelier
87
+ awning
88
+ streetlight
89
+ booth
90
+ television receiver
91
+ airplane
92
+ dirt track
93
+ apparel
94
+ pole
95
+ land
96
+ bannister
97
+ escalator
98
+ ottoman
99
+ bottle
100
+ buffet
101
+ poster
102
+ stage
103
+ van
104
+ ship
105
+ fountain
106
+ conveyer belt
107
+ canopy
108
+ washer
109
+ plaything
110
+ swimming pool
111
+ stool
112
+ barrel
113
+ basket
114
+ waterfall
115
+ tent
116
+ bag
117
+ minibike
118
+ cradle
119
+ oven
120
+ ball
121
+ food
122
+ step
123
+ tank
124
+ trade name
125
+ microwave
126
+ pot
127
+ animal
128
+ bicycle
129
+ lake
130
+ dishwasher
131
+ screen
132
+ blanket
133
+ sculpture
134
+ hood
135
+ sconce
136
+ vase
137
+ traffic light
138
+ tray
139
+ ashcan
140
+ fan
141
+ pier
142
+ crt screen
143
+ plate
144
+ monitor
145
+ bulletin board
146
+ shower
147
+ radiator
148
+ glass
149
+ clock
150
+ flag
segmentation2/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ tensorflow
4
+ numpy
5
+ Image
6
+ matplotlib