Karin0616 commited on
Commit
7492e4d
โ€ข
1 Parent(s): 6e15106

block test

Browse files
Files changed (1) hide show
  1. app.py +38 -18
app.py CHANGED
@@ -15,8 +15,8 @@ model = TFSegformerForSemanticSegmentation.from_pretrained(
15
  "nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
16
  )
17
 
18
-
19
  def ade_palette():
 
20
  return [
21
  [204, 87, 92], # road (Reddish)
22
  [112, 185, 212], # sidewalk (Blue)
@@ -37,8 +37,8 @@ def ade_palette():
37
  [128, 0, 128], # train (Purple)
38
  [255, 255, 0], # motorcycle (Yellow)
39
  [128, 0, 128] # bicycle (Purple)
40
- ]
41
 
 
42
 
43
  labels_list = []
44
 
@@ -48,7 +48,6 @@ with open(r'labels.txt', 'r') as fp:
48
 
49
  colormap = np.asarray(ade_palette())
50
 
51
-
52
  def label_to_color_image(label):
53
  if label.ndim != 2:
54
  raise ValueError("Expect 2-D input label")
@@ -57,15 +56,14 @@ def label_to_color_image(label):
57
  raise ValueError("label value too large.")
58
  return colormap[label]
59
 
60
-
61
  def draw_plot(pred_img, seg):
62
  fig = plt.figure(figsize=(20, 15))
 
63
  grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
64
 
65
  plt.subplot(grid_spec[0])
66
  plt.imshow(pred_img)
67
  plt.axis('off')
68
-
69
  LABEL_NAMES = np.asarray(labels_list)
70
  FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
71
  FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
@@ -79,7 +77,6 @@ def draw_plot(pred_img, seg):
79
  ax.tick_params(width=0.0, labelsize=25)
80
  return fig
81
 
82
-
83
  def sepia(input_img):
84
  input_img = Image.fromarray(input_img)
85
 
@@ -88,7 +85,9 @@ def sepia(input_img):
88
  logits = outputs.logits
89
 
90
  logits = tf.transpose(logits, [0, 2, 3, 1])
91
- logits = tf.image.resize(logits, input_img.size[::-1])
 
 
92
  seg = tf.math.argmax(logits, axis=-1)[0]
93
 
94
  color_seg = np.zeros(
@@ -97,23 +96,44 @@ def sepia(input_img):
97
  for label, color in enumerate(colormap):
98
  color_seg[seg.numpy() == label, :] = color
99
 
 
100
  pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
101
  pred_img = pred_img.astype(np.uint8)
102
 
103
  fig = draw_plot(pred_img, seg)
104
  return fig
105
-
106
-
107
- # Gradio Blocks๋กœ ๋ณ€ํ™˜
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  with gr.Blocks() as demo:
109
- img_input = gr.Image(shape=(564, 846))
110
- img_output = gr.Image()
111
 
112
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ๋ฐ›๋Š” ๋ถ€๋ถ„ ์ถ”๊ฐ€
113
- input_img = gr.Image(shape=(564, 846), source=img_input)
114
- input_img.click(sepia, img_input, img_output)
115
 
116
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์— ๋Œ€ํ•œ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅ
117
- img_output.source(sepia, img_input)
118
 
119
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
15
  "nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
16
  )
17
 
 
18
  def ade_palette():
19
+
20
  return [
21
  [204, 87, 92], # road (Reddish)
22
  [112, 185, 212], # sidewalk (Blue)
 
37
  [128, 0, 128], # train (Purple)
38
  [255, 255, 0], # motorcycle (Yellow)
39
  [128, 0, 128] # bicycle (Purple)
 
40
 
41
+ ]
42
 
43
  labels_list = []
44
 
 
48
 
49
  colormap = np.asarray(ade_palette())
50
 
 
51
  def label_to_color_image(label):
52
  if label.ndim != 2:
53
  raise ValueError("Expect 2-D input label")
 
56
  raise ValueError("label value too large.")
57
  return colormap[label]
58
 
 
59
  def draw_plot(pred_img, seg):
60
  fig = plt.figure(figsize=(20, 15))
61
+
62
  grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
63
 
64
  plt.subplot(grid_spec[0])
65
  plt.imshow(pred_img)
66
  plt.axis('off')
 
67
  LABEL_NAMES = np.asarray(labels_list)
68
  FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
69
  FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
 
77
  ax.tick_params(width=0.0, labelsize=25)
78
  return fig
79
 
 
80
  def sepia(input_img):
81
  input_img = Image.fromarray(input_img)
82
 
 
85
  logits = outputs.logits
86
 
87
  logits = tf.transpose(logits, [0, 2, 3, 1])
88
+ logits = tf.image.resize(
89
+ logits, input_img.size[::-1]
90
+ ) # We reverse the shape of `image` because `image.size` returns width and height.
91
  seg = tf.math.argmax(logits, axis=-1)[0]
92
 
93
  color_seg = np.zeros(
 
96
  for label, color in enumerate(colormap):
97
  color_seg[seg.numpy() == label, :] = color
98
 
99
+ # Show image + mask
100
  pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
101
  pred_img = pred_img.astype(np.uint8)
102
 
103
  fig = draw_plot(pred_img, seg)
104
  return fig
105
+ """
106
+ demo = gr.Interface(fn=sepia,
107
+ inputs=gr.Image(shape=(564,846)),
108
+ outputs=['plot'],
109
+ live=True,
110
+ examples=["city1.jpg","city2.jpg","city3.jpg"],
111
+ allow_flagging='never',
112
+ title="This is a machine learning activity project at Kyunggi University.",
113
+ theme="darkpeach",
114
+ css="""
115
+ body {
116
+ background-color: dark;
117
+ color: white; /* ํฐํŠธ ์ƒ‰์ƒ ์ˆ˜์ • */
118
+ font-family: Arial, sans-serif; /* ํฐํŠธ ํŒจ๋ฐ€๋ฆฌ ์ˆ˜์ • */
119
+ }
120
+ """
121
+
122
+ )
123
+ """
124
  with gr.Blocks() as demo:
 
 
125
 
 
 
 
126
 
 
 
127
 
128
+ gr.MarkDown("This is a machine learning activity project at Kyunggi University.")
129
+ with gr.Row():
130
+ with gr.Column():
131
+ inputs = gr.Image(shape=(564, 846))
132
+ with gr.Column():
133
+ outputs = ['plot']
134
+ btn=gr.Button("Activate")
135
+ btn.click(sepia,inputs=[inputs],outputs=[outputs])
136
+ gr.Examples(["city1.jpg", "city2.jpg", "city3.jpg"],inputs=[inputs])
137
+ if __name__ == "__main__":
138
+ demo.launch()
139
+