Karin0616 commited on
Commit
e5862fe
β€’
1 Parent(s): a45a6f9

example radio

Browse files
Files changed (1) hide show
  1. app.py +56 -29
app.py CHANGED
@@ -1,39 +1,66 @@
1
  import gradio as gr
2
- import tensorflow as tf
 
 
 
 
3
  from PIL import Image
4
- import requests
5
-
6
- # λͺ¨λΈ λ‘œλ“œ
7
- model = tf.saved_model.load("nvidia_segformer_b5_finetuned_cityscapes_1024")
8
-
9
- # λ ˆμ΄λΈ” 및 색상 μ •μ˜
10
- label_colors = {
11
- "road": [204, 87, 92],
12
- "sidewalk": [112, 185, 212],
13
- "building": [196, 160, 122],
14
- "wall": [106, 135, 242],
15
- "fence": [91, 192, 222],
16
- "pole": [255, 192, 203],
17
- "traffic_light": [176, 224, 230],
18
- "traffic_sign": [222, 49, 99],
19
- "vegetation": [139, 69, 19],
20
- "terrain": [255, 0, 0],
21
- "sky": [0, 0, 255],
22
- "person": [255, 228, 181],
23
- "rider": [128, 0, 0],
24
- "car": [0, 128, 0],
25
- "truck": [255, 99, 71],
26
- "bus": [0, 255, 0],
27
- "train": [128, 0, 128],
28
- "motorcycle": [255, 255, 0],
29
- "bicycle": [128, 0, 128]
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Gradio μΈν„°νŽ˜μ΄μŠ€ μ •μ˜
33
  iface = gr.Interface(
34
  fn=lambda image: predict_segmentation(image, model),
35
  inputs="image",
36
- outputs="image"
 
37
  )
38
  iface.launch()
39
 
 
1
  import gradio as gr
2
+ import random
3
+
4
+ from matplotlib import gridspec
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
  from PIL import Image
8
+ import tensorflow as tf
9
+ from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
10
+
11
+ feature_extractor = SegformerFeatureExtractor.from_pretrained(
12
+ "nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
13
+ )
14
+ model = TFSegformerForSemanticSegmentation.from_pretrained(
15
+ "nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
16
+ )
17
+
18
+ def palette():
19
+
20
+ return [
21
+ [204, 87, 92], # road (Reddish)
22
+ [112, 185, 212], # sidewalk (Blue)
23
+ [196, 160, 122], # building (Brown)
24
+ [106, 135, 242], # wall (Light Blue)
25
+ [91, 192, 222], # fence (Turquoise)
26
+ [255, 192, 203], # pole (Pink)
27
+ [176, 224, 230], # traffic light (Light Blue)
28
+ [222, 49, 99], # traffic sign (Red)
29
+ [139, 69, 19], # vegetation (Brown)
30
+ [255, 0, 0], # terrain (Red)
31
+ [0, 0, 255], # sky (Blue)
32
+ [255, 228, 181], # person (Peach)
33
+ [128, 0, 0], # rider (Maroon)
34
+ [0, 128, 0], # car (Green)
35
+ [255, 99, 71], # truck (Tomato)
36
+ [0, 255, 0], # bus (Lime)
37
+ [128, 0, 128], # train (Purple)
38
+ [255, 255, 0], # motorcycle (Yellow)
39
+ [128, 0, 128] # bicycle (Purple)
40
+
41
+ ]
42
+
43
+ labels_list = []
44
+
45
+ with open(r'labels.txt', 'r') as fp:
46
+ for line in fp:
47
+ labels_list.append(line[:-1])
48
+ colormap = np.asarray(palette())
49
+
50
+ def label_to_color_image(label):
51
+ if label.ndim != 2:
52
+ raise ValueError("Expect 2-D input label")
53
+
54
+ if np.max(label) >= len(colormap):
55
+ raise ValueError("label value too large.")
56
+ return colormap[label]
57
 
58
  # Gradio μΈν„°νŽ˜μ΄μŠ€ μ •μ˜
59
  iface = gr.Interface(
60
  fn=lambda image: predict_segmentation(image, model),
61
  inputs="image",
62
+ outputs="image",
63
+ examples=["city1.jpg","city2.jpg","city3.jpg"],
64
  )
65
  iface.launch()
66