try: import detectron2 except: import os os.system('pip install git+https://github.com/facebookresearch/detectron2.git') import cv2 import torch from matplotlib.pyplot import axis import gradio as gr import requests import numpy as np from torch import nn import requests import torch from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog model_path = "https://huggingface.co/dbmdz/detectron2-model/resolve/main/model_final.pth" cfg = get_cfg() cfg.merge_from_file("./faster_rcnn_X_101_32x8d_FPN_3x.yaml") cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 cfg.MODEL.WEIGHTS = model_path my_metadata = MetadataCatalog.get("dbmdz_coco_all") my_metadata.thing_classes = ["Illumination", "Illustration"] if not torch.cuda.is_available(): cfg.MODEL.DEVICE = "cpu" def inference(image_url, image, min_score): if image_url: r = requests.get(image_url) if r: im = np.frombuffer(r.content, dtype="uint8") im = cv2.imdecode(im, cv2.IMREAD_COLOR) else: # Model expect BGR! im = image[:,:,::-1] cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score predictor = DefaultPredictor(cfg) outputs = predictor(im) v = Visualizer(im, my_metadata, scale=1.2) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) return out.get_image() title = "DBMDZ Detectron2 Model Demo" description = "This demo introduces an interactive playground for our trained Detectron2 model.
The model was trained on manually annotated segments from digitized books to detect Illustration or Illumination segments on a given page." article = '

Detectron model is available from our repository here on the Hugging Face Model Hub.

' gr.Interface( inference, [gr.inputs.Textbox(label="Image URL", placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg"), gr.inputs.Image(type="numpy", label="Input Image"), gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score"), ], gr.outputs.Image(type="pil", label="Output"), title=title, description=description, article=article, examples=[]).launch()