ezzattarek commited on
Commit
2ba54d0
1 Parent(s): 90efe96

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -0
app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import detectron2
3
+ except:
4
+ import os
5
+
6
+ os.system("pip install git+https://github.com/facebookresearch/detectron2.git")
7
+
8
+ import cv2
9
+
10
+ from matplotlib.pyplot import axis
11
+ import gradio as gr
12
+ import requests
13
+ import numpy as np
14
+ from torch import nn
15
+ import requests
16
+
17
+ import torch
18
+
19
+ from detectron2 import model_zoo
20
+ from detectron2.engine import DefaultPredictor
21
+ from detectron2.config import get_cfg
22
+ from detectron2.utils.visualizer import Visualizer
23
+ from detectron2.data import MetadataCatalog
24
+
25
+
26
+ models = [
27
+ {
28
+ "name": "Version 1 (2-class)",
29
+ "model_path": "https://huggingface.co/dbmdz/detectron2-model/resolve/main/model_final.pth",
30
+ "classes": ["footer"],
31
+ "cfg": None,
32
+ "metadata": None,
33
+ },
34
+ ]
35
+
36
+ model_name_to_id = {model["name"]: id_ for id_, model in enumerate(models)}
37
+
38
+ for model in models:
39
+ model["cfg"] = get_cfg()
40
+ model["cfg"].merge_from_file("./configs/detectron2/faster_rcnn_R_50_FPN_3x.yaml")
41
+ model["cfg"].MODEL.ROI_HEADS.NUM_CLASSES = len(model["classes"])
42
+ model["cfg"].MODEL.WEIGHTS = model["model_path"]
43
+
44
+ model["metadata"] = MetadataCatalog.get(model["name"])
45
+ model["metadata"].thing_classes = model["classes"]
46
+
47
+ if not torch.cuda.is_available():
48
+ model["cfg"].MODEL.DEVICE = "cpu"
49
+
50
+
51
+ def inference(image_url, image, min_score, model_name):
52
+ if image_url:
53
+ r = requests.get(image_url)
54
+ if r:
55
+ im = np.frombuffer(r.content, dtype="uint8")
56
+ im = cv2.imdecode(im, cv2.IMREAD_COLOR)
57
+ else:
58
+ # Model expect BGR!
59
+ im = image[:, :, ::-1]
60
+
61
+ model_id = model_name_to_id[model_name]
62
+
63
+ models[model_id]["cfg"].MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
64
+ predictor = DefaultPredictor(models[model_id]["cfg"])
65
+
66
+ outputs = predictor(im)
67
+
68
+ v = Visualizer(im, models[model_id]["metadata"], scale=1.2)
69
+ out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
70
+
71
+ return out.get_image()
72
+
73
+
74
+ title = "# DBMDZ Detectron2 Model Demo"
75
+ description = """
76
+ This demo introduces an interactive playground for our trained Detectron2 model.
77
+ """
78
+ footer = "Made in Egypt with ❤️."
79
+
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown(title)
82
+ gr.Markdown(description)
83
+
84
+ with gr.Tab("From URL"):
85
+ url_input = gr.Textbox(
86
+ label="Image URL",
87
+ placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg",
88
+ )
89
+
90
+ with gr.Tab("From Image"):
91
+ image_input = gr.Image(type="numpy", label="Input Image")
92
+
93
+ min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
94
+
95
+ model_name = gr.Radio(
96
+ choices=[model["name"] for model in models],
97
+ value=models[0]["name"],
98
+ label="Select Detectron2 model",
99
+ )
100
+
101
+ output_image = gr.Image(type="pil", label="Output")
102
+
103
+ inference_button = gr.Button("Submit")
104
+
105
+ inference_button.click(
106
+ fn=inference,
107
+ inputs=[url_input, image_input, min_score, model_name],
108
+ outputs=output_image,
109
+ )
110
+
111
+ gr.Markdown(footer)
112
+
113
+ demo.launch()