Karin0616 commited on
Commit
6e15106
ยท
1 Parent(s): e3cea39

block test

Browse files
Files changed (1) hide show
  1. app.py +20 -78
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import gradio as gr
2
  import random
 
 
 
3
  import numpy as np
4
  from PIL import Image
5
  import tensorflow as tf
6
- from matplotlib import gridspec
7
- import matplotlib.pyplot as plt
8
  from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
 
10
  feature_extractor = SegformerFeatureExtractor.from_pretrained(
@@ -39,11 +40,11 @@ def ade_palette():
39
  ]
40
 
41
 
42
- labels_list = [
43
- "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
44
- "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
45
- "truck", "bus", "train", "motorcycle", "bicycle"
46
- ]
47
 
48
  colormap = np.asarray(ade_palette())
49
 
@@ -103,75 +104,16 @@ def sepia(input_img):
103
  return fig
104
 
105
 
 
106
  with gr.Blocks() as demo:
107
- with gr.Row():
108
- num_boxes = gr.Slider(1, 1, 1, step=0, label="Number of boxes")
109
- num_segments = gr.Slider(0, 19, 1, step=1, label="Number of segments")
110
-
111
- with gr.Row():
112
- img_input = gr.Image()
113
- img_output = gr.AnnotatedImage(
114
- color_map={
115
- "road": "#CC575C",
116
- "sidewalk": "#70B9D4",
117
- "building": "#C4A07A",
118
- "wall": "#6A87F2",
119
- "fence": "#5BC0DE",
120
- "pole": "#FFC0CB",
121
- "traffic light": "#B0E0E6",
122
- "traffic sign": "#DE3163",
123
- "vegetation": "#8B4513",
124
- "terrain": "#FF0000",
125
- "sky": "#0000FF",
126
- "person": "#FFE4B5",
127
- "rider": "#800000",
128
- "car": "#008000",
129
- "truck": "#FF6347",
130
- "bus": "#00FF00",
131
- "train": "#800080",
132
- "motorcycle": "#FFFF00",
133
- "bicycle": "#800080"
134
- }
135
- )
136
-
137
- section_btn = gr.Button("Identify Sections")
138
- selected_section = gr.Textbox(label="Selected Section")
139
-
140
-
141
- def section(img, num_boxes, num_segments):
142
- sections = []
143
-
144
- for a in range(num_boxes):
145
- x = random.randint(0, img.shape[1])
146
- y = random.randint(0, img.shape[0])
147
- w = random.randint(0, img.shape[1] - x)
148
- h = random.randint(0, img.shape[0] - y)
149
- sections.append(((x, y, x + w, y + h), labels_list[a]))
150
-
151
- for b in range(num_segments):
152
- x = random.randint(0, img.shape[1])
153
- y = random.randint(0, img.shape[0])
154
- r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
155
- mask = np.zeros(img.shape[:2])
156
-
157
- for i in range(img.shape[0]):
158
- for j in range(img.shape[1]):
159
- dist_square = (i - y) ** 2 + (j - x) ** 2
160
- if dist_square < r ** 2:
161
- mask[i, j] = round((r ** 2 - dist_square) / r ** 2 * 4) / 4
162
-
163
- sections.append((mask, labels_list[b + num_boxes]))
164
-
165
- return (img, sections)
166
-
167
-
168
- section_btn.click(section, [img_input, num_boxes, num_segments], img_output)
169
-
170
-
171
- def select_section(evt: gr.SelectData):
172
- return labels_list[evt.index]
173
-
174
-
175
- img_output.select(select_section, None, selected_section)
176
-
177
- demo.launch()
 
1
  import gradio as gr
2
  import random
3
+
4
+ from matplotlib import gridspec
5
+ import matplotlib.pyplot as plt
6
  import numpy as np
7
  from PIL import Image
8
  import tensorflow as tf
 
 
9
  from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
10
 
11
  feature_extractor = SegformerFeatureExtractor.from_pretrained(
 
40
  ]
41
 
42
 
43
+ labels_list = []
44
+
45
+ with open(r'labels.txt', 'r') as fp:
46
+ for line in fp:
47
+ labels_list.append(line[:-1])
48
 
49
  colormap = np.asarray(ade_palette())
50
 
 
104
  return fig
105
 
106
 
107
+ # Gradio Blocks๋กœ ๋ณ€ํ™˜
108
  with gr.Blocks() as demo:
109
+ img_input = gr.Image(shape=(564, 846))
110
+ img_output = gr.Image()
111
+
112
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ๋ฐ›๋Š” ๋ถ€๋ถ„ ์ถ”๊ฐ€
113
+ input_img = gr.Image(shape=(564, 846), source=img_input)
114
+ input_img.click(sepia, img_input, img_output)
115
+
116
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์— ๋Œ€ํ•œ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅ
117
+ img_output.source(sepia, img_input)
118
+
119
+ demo.launch()