Spaces:
Sleeping
Sleeping
Duplicate from dbmdz/detectron2-model-demo
Browse filesCo-authored-by: Stefan <stefan-it@users.noreply.huggingface.co>
- .gitattributes +31 -0
- README.md +14 -0
- app.py +74 -0
- configs/detectron2/Base-RCNN-FPN.yaml +42 -0
- configs/detectron2/faster_rcnn_R_50_FPN_3x.yaml +9 -0
- requirements.txt +4 -0
.gitattributes
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Detectron2 Model Demo
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.1.4
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
duplicated_from: dbmdz/detectron2-model-demo
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
import detectron2
|
3 |
+
except:
|
4 |
+
import os
|
5 |
+
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
|
6 |
+
|
7 |
+
import cv2
|
8 |
+
|
9 |
+
from matplotlib.pyplot import axis
|
10 |
+
import gradio as gr
|
11 |
+
import requests
|
12 |
+
import numpy as np
|
13 |
+
from torch import nn
|
14 |
+
import requests
|
15 |
+
|
16 |
+
import torch
|
17 |
+
|
18 |
+
from detectron2 import model_zoo
|
19 |
+
from detectron2.engine import DefaultPredictor
|
20 |
+
from detectron2.config import get_cfg
|
21 |
+
from detectron2.utils.visualizer import Visualizer
|
22 |
+
from detectron2.data import MetadataCatalog
|
23 |
+
|
24 |
+
|
25 |
+
model_path = "https://huggingface.co/dbmdz/detectron2-model/resolve/main/model_final.pth"
|
26 |
+
|
27 |
+
cfg = get_cfg()
|
28 |
+
cfg.merge_from_file("./configs/detectron2/faster_rcnn_R_50_FPN_3x.yaml")
|
29 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
|
30 |
+
cfg.MODEL.WEIGHTS = model_path
|
31 |
+
|
32 |
+
my_metadata = MetadataCatalog.get("dbmdz_coco_all")
|
33 |
+
my_metadata.thing_classes = ["Illumination", "Illustration"]
|
34 |
+
|
35 |
+
if not torch.cuda.is_available():
|
36 |
+
cfg.MODEL.DEVICE = "cpu"
|
37 |
+
|
38 |
+
|
39 |
+
def inference(image_url, image, min_score):
|
40 |
+
if image_url:
|
41 |
+
r = requests.get(image_url)
|
42 |
+
if r:
|
43 |
+
im = np.frombuffer(r.content, dtype="uint8")
|
44 |
+
im = cv2.imdecode(im, cv2.IMREAD_COLOR)
|
45 |
+
else:
|
46 |
+
# Model expect BGR!
|
47 |
+
im = image[:,:,::-1]
|
48 |
+
|
49 |
+
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
|
50 |
+
predictor = DefaultPredictor(cfg)
|
51 |
+
|
52 |
+
outputs = predictor(im)
|
53 |
+
|
54 |
+
v = Visualizer(im, my_metadata, scale=1.2)
|
55 |
+
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
56 |
+
|
57 |
+
return out.get_image()
|
58 |
+
|
59 |
+
|
60 |
+
title = "DBMDZ Detectron2 Model Demo"
|
61 |
+
description = "This demo introduces an interactive playground for our trained Detectron2 model. <br>The model was trained on manually annotated segments from digitized books to detect Illustration or Illumination segments on a given page."
|
62 |
+
article = '<p>Detectron model is available from our repository <a href="">here</a> on the Hugging Face Model Hub.</p>'
|
63 |
+
|
64 |
+
gr.Interface(
|
65 |
+
inference,
|
66 |
+
[gr.inputs.Textbox(label="Image URL", placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg"),
|
67 |
+
gr.inputs.Image(type="numpy", label="Input Image"),
|
68 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score"),
|
69 |
+
],
|
70 |
+
gr.outputs.Image(type="pil", label="Output"),
|
71 |
+
title=title,
|
72 |
+
description=description,
|
73 |
+
article=article,
|
74 |
+
examples=[]).launch()
|
configs/detectron2/Base-RCNN-FPN.yaml
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MODEL:
|
2 |
+
META_ARCHITECTURE: "GeneralizedRCNN"
|
3 |
+
BACKBONE:
|
4 |
+
NAME: "build_resnet_fpn_backbone"
|
5 |
+
RESNETS:
|
6 |
+
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
|
7 |
+
FPN:
|
8 |
+
IN_FEATURES: ["res2", "res3", "res4", "res5"]
|
9 |
+
ANCHOR_GENERATOR:
|
10 |
+
SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
|
11 |
+
ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
|
12 |
+
RPN:
|
13 |
+
IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
|
14 |
+
PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
|
15 |
+
PRE_NMS_TOPK_TEST: 1000 # Per FPN level
|
16 |
+
# Detectron1 uses 2000 proposals per-batch,
|
17 |
+
# (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
|
18 |
+
# which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
|
19 |
+
POST_NMS_TOPK_TRAIN: 1000
|
20 |
+
POST_NMS_TOPK_TEST: 1000
|
21 |
+
ROI_HEADS:
|
22 |
+
NAME: "StandardROIHeads"
|
23 |
+
IN_FEATURES: ["p2", "p3", "p4", "p5"]
|
24 |
+
ROI_BOX_HEAD:
|
25 |
+
NAME: "FastRCNNConvFCHead"
|
26 |
+
NUM_FC: 2
|
27 |
+
POOLER_RESOLUTION: 7
|
28 |
+
ROI_MASK_HEAD:
|
29 |
+
NAME: "MaskRCNNConvUpsampleHead"
|
30 |
+
NUM_CONV: 4
|
31 |
+
POOLER_RESOLUTION: 14
|
32 |
+
DATASETS:
|
33 |
+
TRAIN: ("coco_2017_train",)
|
34 |
+
TEST: ("coco_2017_val",)
|
35 |
+
SOLVER:
|
36 |
+
IMS_PER_BATCH: 16
|
37 |
+
BASE_LR: 0.02
|
38 |
+
STEPS: (60000, 80000)
|
39 |
+
MAX_ITER: 90000
|
40 |
+
INPUT:
|
41 |
+
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
|
42 |
+
VERSION: 2
|
configs/detectron2/faster_rcnn_R_50_FPN_3x.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_BASE_: "./Base-RCNN-FPN.yaml"
|
2 |
+
MODEL:
|
3 |
+
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
|
4 |
+
MASK_ON: False
|
5 |
+
RESNETS:
|
6 |
+
DEPTH: 50
|
7 |
+
SOLVER:
|
8 |
+
STEPS: (210000, 250000)
|
9 |
+
MAX_ITER: 270000
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python-headless
|
2 |
+
pyyaml==5.1
|
3 |
+
torch
|
4 |
+
torchvision
|