fj11 commited on
Commit
8b653cc
·
verified ·
1 Parent(s): d0ee23b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -0
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os, json
3
+ import numpy as np
4
+ from PIL import Image
5
+ import torch
6
+ from transformers import (
7
+ AutoConfig,
8
+ AutoModelForSemanticSegmentation,
9
+ SegformerImageProcessor,
10
+ )
11
+ import gradio as gr
12
+
13
+ # ===== Config =====
14
+ MODEL_ID = "fj11/fashion"
15
+ PROCESSOR_ID = MODEL_ID
16
+
17
+ # ===== Load processor =====
18
+ try:
19
+ processor = SegformerImageProcessor.from_pretrained(PROCESSOR_ID)
20
+ except Exception:
21
+ # 兜底:没有 preprocessor_config.json 时,手动构造
22
+ processor = SegformerImageProcessor(
23
+ size={"height": 512, "width": 512},
24
+ do_resize=True,
25
+ do_normalize=True
26
+ )
27
+
28
+ # ===== Load model =====
29
+ try:
30
+ cfg = AutoConfig.from_pretrained(MODEL_ID)
31
+ model = AutoModelForSemanticSegmentation.from_pretrained(MODEL_ID, config=cfg)
32
+ except Exception:
33
+ # 兼容老的只存了权重的目录
34
+ model = AutoModelForSemanticSegmentation.from_pretrained(MODEL_ID)
35
+
36
+ device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ dtype = torch.float16 if (device == "cuda") else torch.float32
38
+ model.to(device=device, dtype=dtype)
39
+ model.eval()
40
+
41
+ # ===== id2label / palette =====
42
+ id2label = getattr(model.config, "id2label", None)
43
+ if isinstance(id2label, dict):
44
+ id2label = {int(k): v for k, v in id2label.items()}
45
+ else:
46
+ id2label = {i: str(i) for i in range(model.config.num_labels)}
47
+ NUM_CLASSES = len(id2label)
48
+
49
+ def make_palette(n: int) -> np.ndarray:
50
+ # 固定随机种子,稳定配色;0类设为黑色
51
+ rng = np.random.default_rng(0)
52
+ colors = rng.integers(0, 255, size=(n, 3), dtype=np.uint8)
53
+ colors[0] = np.array([0, 0, 0], dtype=np.uint8)
54
+ return colors
55
+
56
+ PALETTE = make_palette(NUM_CLASSES)
57
+
58
+ # ===== Inference =====
59
+ @torch.no_grad()
60
+ def predict(pil_img: Image.Image, alpha: float = 0.5, show_overlay: bool = True):
61
+ if pil_img is None:
62
+ return None, None
63
+
64
+ img = pil_img.convert("RGB")
65
+ W, H = img.size
66
+
67
+ inputs = processor(images=img, return_tensors="pt")
68
+ pixel_values = inputs["pixel_values"].to(device, dtype=dtype)
69
+
70
+ outputs = model(pixel_values=pixel_values)
71
+ logits = outputs.logits # (1, C, h, w)
72
+
73
+ # 上采样到原图尺寸再 argmax
74
+ up = torch.nn.functional.interpolate(
75
+ logits, size=(H, W), mode="bilinear", align_corners=False
76
+ )
77
+ pred = up.argmax(dim=1)[0].to(torch.uint8).cpu().numpy() # (H, W)
78
+
79
+ # 生成彩色掩码 & 叠加
80
+ color_mask = PALETTE[pred] # (H, W, 3)
81
+ mask_pil = Image.fromarray(color_mask, mode="RGB")
82
+
83
+ if show_overlay:
84
+ overlay = Image.blend(img, mask_pil, float(alpha))
85
+ return overlay, mask_pil
86
+ else:
87
+ return None, mask_pil
88
+
89
+ # ===== Gradio UI =====
90
+ with gr.Blocks(title="Fashion Segmentation") as demo:
91
+ gr.Markdown("## Fashion Segmentation\nUpload an image and run SegFormer inference.")
92
+ with gr.Row():
93
+ inp = gr.Image(type="pil", label="Upload image")
94
+ with gr.Column():
95
+ alpha = gr.Slider(0.0, 1.0, value=0.5, step=0.05, label="Overlay alpha")
96
+ show_overlay = gr.Checkbox(value=True, label="Show overlay")
97
+ btn = gr.Button("Predict", variant="primary")
98
+ with gr.Row():
99
+ out_overlay = gr.Image(label="Overlay", interactive=False)
100
+ out_mask = gr.Image(label="Colored mask", interactive=False)
101
+
102
+ def _run(image, a, show):
103
+ return predict(image, alpha=a, show_overlay=show)
104
+
105
+ btn.click(_run, inputs=[inp, alpha, show_overlay], outputs=[out_overlay, out_mask])
106
+
107
+ if __name__ == "__main__":
108
+ # 本地运行:python app.py
109
+ # HF Spaces:把本文件命名为 app.py 即可
110
+ demo.launch()