Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python3 | |
| """ | |
| Gradio demo — 坐姿检测 / Sitting Posture Detection | |
| HF Spaces 入口:sdk: gradio,app_file: app.py | |
| """ | |
| import sys | |
| import types | |
| # yolov5 内部引用了 huggingface_hub.utils._errors,新版 hf_hub 已将这些类移到 | |
| # huggingface_hub.errors。打一个向前兼容的 shim,避免 ImportError。 | |
| try: | |
| import huggingface_hub.utils._errors # noqa: F401 | |
| except (ModuleNotFoundError, ImportError): | |
| import huggingface_hub.errors as _hf_errors | |
| _shim = types.ModuleType("huggingface_hub.utils._errors") | |
| for _name in dir(_hf_errors): | |
| setattr(_shim, _name, getattr(_hf_errors, _name)) | |
| sys.modules["huggingface_hub.utils._errors"] = _shim | |
| import torch | |
| # PyTorch 2.6+ 将 weights_only 默认改为 True,旧版 yolov5 模型需要兼容处理 | |
| _orig_torch_load = torch.load | |
| def _patched_torch_load(*args, **kwargs): | |
| kwargs.setdefault("weights_only", False) | |
| return _orig_torch_load(*args, **kwargs) | |
| torch.load = _patched_torch_load | |
| import cv2 | |
| import gradio as gr | |
| from app_models.load_model import InferenceModel | |
| # 全局加载模型(避免每次请求重复加载) | |
| MODEL = InferenceModel("small640.pt") | |
| def draw_result(img_bgr, x1, y1, x2, y2, label, conf): | |
| """在图上叠加黄色检测框和标签""" | |
| color = (0, 255, 255) # 黄色 BGR | |
| cv2.rectangle(img_bgr, (x1, y1), (x2, y2), color, 2) | |
| text = f"{label} {conf:.2f}" | |
| (tw, th), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) | |
| cv2.rectangle(img_bgr, (x1, y1 - th - 10), (x1 + tw + 4, y1), color, -1) | |
| cv2.putText(img_bgr, text, (x1 + 2, y1 - 6), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) | |
| return img_bgr | |
| def analyze(image): | |
| """ | |
| Gradio 推理函数 | |
| image: numpy array (RGB,Gradio 默认格式) | |
| returns: (annotated_image_rgb, result_text) | |
| """ | |
| if image is None: | |
| return None, "请上传图片" | |
| img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| results = MODEL.predict(img_bgr) | |
| x1, y1, x2, y2, cls, conf = InferenceModel.get_results(results) | |
| if cls is None: | |
| return image, "⚠️ 未检测到人(置信度低于 0.5)\n\n建议:请使用侧面角度的坐姿图片" | |
| label = "good" if cls == 0 else "bad" | |
| emoji = "✅" if label == "good" else "❌" | |
| result_text = ( | |
| f"{emoji} 姿势:{label}(置信度 {conf:.2f})\n" | |
| f"BBox:[x1={x1}, y1={y1}, x2={x2}, y2={y2}]" | |
| ) | |
| annotated_bgr = draw_result(img_bgr.copy(), x1, y1, x2, y2, label, conf) | |
| annotated_rgb = cv2.cvtColor(annotated_bgr, cv2.COLOR_BGR2RGB) | |
| return annotated_rgb, result_text | |
| demo = gr.Interface( | |
| fn=analyze, | |
| inputs=gr.Image(type="numpy", label="上传坐姿图片(建议侧面角度)"), | |
| outputs=[ | |
| gr.Image(type="numpy", label="检测结果"), | |
| gr.Textbox(label="分析结果", lines=3), | |
| ], | |
| title="🪑 坐姿检测 / Sitting Posture Detection", | |
| description=( | |
| "上传一张**侧面坐姿图片**,自动识别好/坏坐姿。\n\n" | |
| "基于 YOLOv5s,训练数据为侧面标准座椅场景。" | |
| ), | |
| examples=[ | |
| ["examples/bad_1.png"], | |
| ["examples/bad_2.png"], | |
| ["examples/good_1.png"], | |
| ], | |
| allow_flagging="never", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |