Spaces:
Sleeping
Sleeping
jjw
commited on
Commit
·
6842e3e
1
Parent(s):
456e34d
p-31 까지의 내용
Browse files- app.py +99 -0
- label.txt +18 -0
- person-1.jpg +0 -0
- person-2.jpg +0 -0
- person-3.jpg +0 -0
- person-4.jpg +0 -0
- person-5.jpg +0 -0
- requirements.txt +6 -0
app.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from matplotlib import gridspec
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import torch
|
| 7 |
+
from transformers import SegformerFeatureExtractor, AutoModelForSemanticSegmentation
|
| 8 |
+
|
| 9 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
| 10 |
+
"mattmdjaga/segformer_b2_clothes"
|
| 11 |
+
)
|
| 12 |
+
model = AutoModelForSemanticSegmentation.from_pretrained(
|
| 13 |
+
"mattmdjaga/segformer_b2_clothes"
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
def ade_palette():
|
| 17 |
+
"""ADE20K palette that maps each class to RGB values."""
|
| 18 |
+
return [
|
| 19 |
+
[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80],
|
| 20 |
+
[140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 21 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 5, 153], [6, 51, 255], [255, 153, 5]
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
labels_list = []
|
| 25 |
+
with open("../Segmentation/labels.txt", "r", encoding="utf-8") as fp:
|
| 26 |
+
for line in fp:
|
| 27 |
+
labels_list.append(line.rstrip("\n"))
|
| 28 |
+
|
| 29 |
+
colormap = np.asarray(ade_palette(), dtype=np.uint8)
|
| 30 |
+
|
| 31 |
+
def label_to_color_image(label):
|
| 32 |
+
if label.ndim != 2:
|
| 33 |
+
raise ValueError("Expect 2-D input label")
|
| 34 |
+
if np.max(label) >= len(colormap):
|
| 35 |
+
raise ValueError("label value too large.")
|
| 36 |
+
return colormap[label]
|
| 37 |
+
|
| 38 |
+
def draw_plot(pred_img, seg_np):
|
| 39 |
+
fig = plt.figure(figsize=(20, 15))
|
| 40 |
+
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
| 41 |
+
|
| 42 |
+
plt.subplot(grid_spec[0])
|
| 43 |
+
plt.imshow(pred_img)
|
| 44 |
+
plt.axis('off')
|
| 45 |
+
|
| 46 |
+
LABEL_NAMES = np.asarray(labels_list)
|
| 47 |
+
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
| 48 |
+
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
| 49 |
+
|
| 50 |
+
unique_labels = np.unique(seg_np.astype("uint8"))
|
| 51 |
+
ax = plt.subplot(grid_spec[1])
|
| 52 |
+
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
| 53 |
+
ax.yaxis.tick_right()
|
| 54 |
+
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
| 55 |
+
plt.xticks([], [])
|
| 56 |
+
ax.tick_params(width=0.0, labelsize=25)
|
| 57 |
+
return fig
|
| 58 |
+
|
| 59 |
+
def run_inference(input_img):
|
| 60 |
+
# input: numpy array from gradio -> PIL
|
| 61 |
+
img = Image.fromarray(input_img.astype(np.uint8)) if isinstance(input_img, np.ndarray) else input_img
|
| 62 |
+
if img.mode != "RGB":
|
| 63 |
+
img = img.convert("RGB")
|
| 64 |
+
|
| 65 |
+
inputs = feature_extractor(images=img, return_tensors="pt")
|
| 66 |
+
with torch.no_grad():
|
| 67 |
+
outputs = model(**inputs)
|
| 68 |
+
logits = outputs.logits # (1, C, h/4, w/4)
|
| 69 |
+
|
| 70 |
+
# resize to original
|
| 71 |
+
upsampled = torch.nn.functional.interpolate(
|
| 72 |
+
logits, size=img.size[::-1], mode="bilinear", align_corners=False
|
| 73 |
+
)
|
| 74 |
+
seg = upsampled.argmax(dim=1)[0].cpu().numpy().astype(np.uint8) # (H,W)
|
| 75 |
+
|
| 76 |
+
# colorize & overlay
|
| 77 |
+
color_seg = colormap[seg] # (H,W,3)
|
| 78 |
+
pred_img = (np.array(img) * 0.5 + color_seg * 0.5).astype(np.uint8)
|
| 79 |
+
|
| 80 |
+
fig = draw_plot(pred_img, seg)
|
| 81 |
+
return fig
|
| 82 |
+
|
| 83 |
+
demo = gr.Interface(
|
| 84 |
+
fn=run_inference,
|
| 85 |
+
inputs=gr.Image(type="numpy", label="Input Image"),
|
| 86 |
+
outputs=gr.Plot(label="Overlay + Legend"),
|
| 87 |
+
examples=[
|
| 88 |
+
"person-1.jpg",
|
| 89 |
+
"person-2.jpg",
|
| 90 |
+
"person-3.jpg",
|
| 91 |
+
"person-4.jpg",
|
| 92 |
+
"person-5.jpg"
|
| 93 |
+
],
|
| 94 |
+
flagging_mode="never",
|
| 95 |
+
cache_examples=False,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
+
demo.launch()
|
label.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
background
|
| 2 |
+
hat
|
| 3 |
+
hair
|
| 4 |
+
sunglasses
|
| 5 |
+
upper-clothes
|
| 6 |
+
skirt
|
| 7 |
+
pants
|
| 8 |
+
dress
|
| 9 |
+
belt
|
| 10 |
+
left-shoe
|
| 11 |
+
right-shoe
|
| 12 |
+
face
|
| 13 |
+
left-leg
|
| 14 |
+
right-leg
|
| 15 |
+
left-arm
|
| 16 |
+
right-arm
|
| 17 |
+
bag
|
| 18 |
+
scarf
|
person-1.jpg
ADDED
|
person-2.jpg
ADDED
|
person-3.jpg
ADDED
|
person-4.jpg
ADDED
|
person-5.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
transformers>=4.41.0
|
| 3 |
+
gradio>=4.0.0
|
| 4 |
+
Pillow
|
| 5 |
+
numpy
|
| 6 |
+
matplotlib
|