swjin commited on
Commit
5c3dbfa
1 Parent(s): 8952370

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -8,10 +8,10 @@ import tensorflow as tf
8
  from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
 
10
  feature_extractor = SegformerFeatureExtractor.from_pretrained(
11
- "mattmdjaga/segformer_b2_clothes"
12
  )
13
  model = TFSegformerForSemanticSegmentation.from_pretrained(
14
- "mattmdjaga/segformer_b2_clothes"
15
  )
16
 
17
  def ade_palette():
@@ -35,6 +35,7 @@ def ade_palette():
35
  [78, 89, 189],
36
  [189, 78, 57],
37
  [112, 200, 78],
 
38
  ]
39
 
40
  labels_list = []
@@ -103,7 +104,7 @@ def sepia(input_img):
103
  demo = gr.Interface(fn=sepia,
104
  inputs=gr.Image(shape=(400, 600)),
105
  outputs=['plot'],
106
- examples=["person-1.jpg", "person-2.jpg", "person-3.jpg", "person-4.jpg", "person-5.jpg"],
107
  allow_flagging='never')
108
 
109
 
 
8
  from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
 
10
  feature_extractor = SegformerFeatureExtractor.from_pretrained(
11
+ "nvidia/segformer-b0-finetuned-cityscapes-768-768"
12
  )
13
  model = TFSegformerForSemanticSegmentation.from_pretrained(
14
+ "nvidia/segformer-b0-finetuned-cityscapes-768-768"
15
  )
16
 
17
  def ade_palette():
 
35
  [78, 89, 189],
36
  [189, 78, 57],
37
  [112, 200, 78],
38
+ [0, 0, 0],
39
  ]
40
 
41
  labels_list = []
 
104
  demo = gr.Interface(fn=sepia,
105
  inputs=gr.Image(shape=(400, 600)),
106
  outputs=['plot'],
107
+ examples=["image1.jpg", "image2.jpg", "image3.jpg", "image4.jpg"],
108
  allow_flagging='never')
109
 
110