DDingcheol commited on
Commit
b5a5cbc
1 Parent(s): 55de6f0

Upload app (2).txt

Browse files
Files changed (1) hide show
  1. app (2).txt +110 -0
app (2).txt ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from PIL import Image
5
+ from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
6
+ import matplotlib.pyplot as plt
7
+ from matplotlib import gridspec
8
+
9
+ feature_extractor = SegformerFeatureExtractor.from_pretrained(
10
+ "nvidia/segformer-b0-finetuned-cityscapes-1024-1024"
11
+ )
12
+ model = TFSegformerForSemanticSegmentation.from_pretrained(
13
+ "nvidia/segformer-b0-finetuned-cityscapes-1024-1024"
14
+ )
15
+
16
+ def ade_palette():
17
+ """ADE20K palette that maps each class to RGB values."""
18
+ return [
19
+ [255, 0, 0],
20
+ [255, 187, 0],
21
+ [255, 228, 0],
22
+ [29, 219, 22],
23
+ [178, 204, 255],
24
+ [1, 0, 255],
25
+ [165, 102, 255],
26
+ [217, 65, 197],
27
+ [116, 116, 116],
28
+ [204, 114, 61],
29
+ [206, 242, 121],
30
+ [61, 183, 204],
31
+ [94, 94, 94],
32
+ [196, 183, 59],
33
+ [246, 246, 246],
34
+ [209, 178, 255],
35
+ [0, 87, 102],
36
+ [153, 0, 76],
37
+ [47, 157, 39]
38
+ ]
39
+
40
+ labels_list = []
41
+
42
+ with open(r'labels.txt', 'r') as fp:
43
+ for line in fp:
44
+ labels_list.append(line[:-1])
45
+
46
+ colormap = np.asarray(ade_palette())
47
+
48
+ def label_to_color_image(label):
49
+ if label.ndim != 2:
50
+ raise ValueError("Expect 2-D input label")
51
+
52
+ if np.max(label) >= len(colormap):
53
+ raise ValueError("label value too large.")
54
+ return colormap[label]
55
+
56
+ def draw_plot(pred_img, seg):
57
+ fig = plt.figure(figsize=(20, 15))
58
+
59
+ grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
60
+
61
+ plt.subplot(grid_spec[0])
62
+ plt.imshow(pred_img)
63
+ plt.axis('off')
64
+ LABEL_NAMES = np.asarray(labels_list)
65
+ FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
66
+ FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
67
+
68
+ unique_labels = np.unique(seg.numpy().astype("uint8"))
69
+ ax = plt.subplot(grid_spec[1])
70
+ plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
71
+ ax.yaxis.tick_right()
72
+ plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
73
+ plt.xticks([], [])
74
+ ax.tick_params(width=0.0, labelsize=25)
75
+ return fig
76
+
77
+ def sepia(input_img):
78
+ input_img = Image.fromarray(input_img)
79
+
80
+ inputs = feature_extractor(images=input_img, return_tensors="tf")
81
+ outputs = model(**inputs)
82
+ logits = outputs.logits
83
+
84
+ logits = tf.transpose(logits, [0, 2, 3, 1])
85
+ logits = tf.image.resize(
86
+ logits, input_img.size[::-1]
87
+ )
88
+ seg = tf.math.argmax(logits, axis=-1)[0]
89
+
90
+ color_seg = np.zeros(
91
+ (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
92
+ )
93
+ for label, color in enumerate(colormap):
94
+ color_seg[seg.numpy() == label, :] = color
95
+
96
+ pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
97
+ pred_img = pred_img.astype(np.uint8)
98
+
99
+ fig = draw_plot(pred_img, seg)
100
+
101
+ return fig
102
+
103
+ demo = gr.Interface(fn=sepia,
104
+ inputs=gr.Image(shape=(400, 600)),
105
+ outputs=['plot'],
106
+ examples=["citiscapes-1.jpeg", "citiscapes-2.jpeg"],
107
+ allow_flagging='never')
108
+
109
+
110
+ demo.launch()