taewon99 commited on
Commit
bb9629e
β€’
1 Parent(s): 27b2858

Upload 9 files

Browse files
Files changed (9) hide show
  1. README.md +5 -5
  2. app.py +124 -0
  3. labels.txt +18 -0
  4. person-1.jpg +0 -0
  5. person-2.jpg +0 -0
  6. person-3.jpg +0 -0
  7. person-4.jpg +0 -0
  8. person-5.jpg +0 -0
  9. requirements.txt +6 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Cloth Segmentation
3
- emoji: πŸ’»
4
- colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.1.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Segmentation
3
+ emoji: πŸ‘€
4
+ colorFrom: red
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.44.4
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from matplotlib import gridspec
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+ from PIL import Image
7
+ import tensorflow as tf
8
+ from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
9
+
10
+ feature_extractor = SegformerFeatureExtractor.from_pretrained(
11
+ "mattmdjaga/segformer_b2_clothes"
12
+ )
13
+ model = TFSegformerForSemanticSegmentation.from_pretrained(
14
+ "mattmdjaga/segformer_b2_clothes"
15
+ )
16
+
17
+
18
+ def ade_palette():
19
+ """ADE20K palette that maps each class to RGB values."""
20
+ return [
21
+ [230, 183, 173],
22
+ [232, 181, 201],
23
+ [234, 208, 179],
24
+ [217, 192, 198],
25
+ [215, 195, 203],
26
+ [206, 166, 191],
27
+ [215, 162, 205],
28
+ [205, 176, 194],
29
+ [234, 196, 184],
30
+ [210, 181, 186],
31
+ [220, 152, 174],
32
+ [219, 187, 206],
33
+ [224, 172, 181],
34
+ [220, 162, 203],
35
+ [219, 188, 160],
36
+ [208, 178, 169],
37
+ [222, 196, 203],
38
+ [208, 153, 190],
39
+ ]
40
+
41
+
42
+ labels_list = []
43
+
44
+ with open(r"labels.txt", "r") as fp:
45
+ for line in fp:
46
+ labels_list.append(line[:-1])
47
+
48
+ colormap = np.asarray(ade_palette())
49
+
50
+
51
+ def label_to_color_image(label):
52
+ if label.ndim != 2:
53
+ raise ValueError("Expect 2-D input label")
54
+
55
+ if np.max(label) >= len(colormap):
56
+ raise ValueError("label value too large.")
57
+ return colormap[label]
58
+
59
+
60
+ def draw_plot(pred_img, seg):
61
+ fig = plt.figure(figsize=(20, 15))
62
+
63
+ grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
64
+
65
+ plt.subplot(grid_spec[0])
66
+ plt.imshow(pred_img)
67
+ plt.axis("off")
68
+ LABEL_NAMES = np.asarray(labels_list)
69
+ FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
70
+ FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
71
+
72
+ unique_labels = np.unique(seg.numpy().astype("uint8"))
73
+ ax = plt.subplot(grid_spec[1])
74
+ plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
75
+ ax.yaxis.tick_right()
76
+ plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
77
+ plt.xticks([], [])
78
+ ax.tick_params(width=0.0, labelsize=25)
79
+ return fig
80
+
81
+
82
+ def sepia(input_img):
83
+ input_img = Image.fromarray(input_img)
84
+
85
+ inputs = feature_extractor(images=input_img, return_tensors="tf")
86
+ outputs = model(**inputs)
87
+ logits = outputs.logits
88
+
89
+ logits = tf.transpose(logits, [0, 2, 3, 1])
90
+ logits = tf.image.resize(
91
+ logits, input_img.size[::-1]
92
+ ) # We reverse the shape of `image` because `image.size` returns width and height.
93
+ seg = tf.math.argmax(logits, axis=-1)[0]
94
+
95
+ color_seg = np.zeros(
96
+ (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
97
+ ) # height, width, 3
98
+ for label, color in enumerate(colormap):
99
+ color_seg[seg.numpy() == label, :] = color
100
+
101
+ # Show image + mask
102
+ pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
103
+ pred_img = pred_img.astype(np.uint8)
104
+
105
+ fig = draw_plot(pred_img, seg)
106
+ return fig
107
+
108
+
109
+ demo = gr.Interface(
110
+ fn=sepia,
111
+ inputs=gr.Image(shape=(400, 600)),
112
+ outputs=["plot"],
113
+ examples=[
114
+ "person-1.jpg",
115
+ "person-2.jpg",
116
+ "person-3.jpg",
117
+ "person-4.jpg",
118
+ "person-5.jpg",
119
+ ],
120
+ allow_flagging="never",
121
+ )
122
+
123
+
124
+ demo.launch()
labels.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Background
2
+ Hat
3
+ Hair
4
+ Sunglasses
5
+ Upper-clothes
6
+ Skirt
7
+ Pants
8
+ Dress
9
+ Belt
10
+ Left-shoe
11
+ Right-shoe
12
+ Face
13
+ Left-leg
14
+ Right-leg
15
+ Left-arm
16
+ Right-arm
17
+ Bag
18
+ Scarf
person-1.jpg ADDED
person-2.jpg ADDED
person-3.jpg ADDED
person-4.jpg ADDED
person-5.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ tensorflow
4
+ numpy
5
+ Image
6
+ matplotlib