mbar0075 commited on
Commit
4a66401
1 Parent(s): 7de0bf6

Initial Commit

Browse files
Files changed (2) hide show
  1. app.py +212 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import supervision as sv
6
+ from inference import get_model
7
+
8
+ MARKDOWN = """
9
+ <h1 style='text-align: center'>Detect Something 📈</h1>
10
+ Welcome to Detect Something! Just a simple demo to showcase the detection capabilities of various YOLOv8 models. 🚀🔍👀
11
+
12
+ A simple project just for fun for on the go object detection. 🎉
13
+
14
+ Inspired from YOLO-ARENA by SkalskiP. 🙏
15
+
16
+ Powered by Roboflow [Inference](https://github.com/roboflow/inference) and
17
+ [Supervision](https://github.com/roboflow/supervision). 🔥
18
+ """
19
+
20
+ IMAGE_EXAMPLES = [
21
+ ['https://media.roboflow.com/supervision/image-examples/people-walking.png', 0.3, 0.3, 0.3],
22
+ ['https://media.roboflow.com/supervision/image-examples/vehicles.png', 0.3, 0.3, 0.3],
23
+ ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.3],
24
+ ]
25
+
26
+ YOLO_V8N_MODEL = get_model(model_id="yolov8n-640")
27
+ YOLO_V8S_MODEL = get_model(model_id="yolov8s-640")
28
+ YOLO_V8M_MODEL = get_model(model_id="yolov8m-640")
29
+
30
+ LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
31
+ BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
32
+
33
+
34
+ def detect_and_annotate(
35
+ model,
36
+ input_image: np.ndarray,
37
+ confidence_threshold: float,
38
+ iou_threshold: float,
39
+ class_id_mapping: dict = None
40
+ ) -> np.ndarray:
41
+ result = model.infer(
42
+ input_image,
43
+ confidence=confidence_threshold,
44
+ iou_threshold=iou_threshold
45
+ )[0]
46
+ detections = sv.Detections.from_inference(result)
47
+
48
+ if class_id_mapping:
49
+ detections.class_id = np.array([
50
+ class_id_mapping[class_id]
51
+ for class_id
52
+ in detections.class_id
53
+ ])
54
+
55
+ labels = [
56
+ f"{class_name} ({confidence:.2f})"
57
+ for class_name, confidence
58
+ in zip(detections['class_name'], detections.confidence)
59
+ ]
60
+
61
+ annotated_image = input_image.copy()
62
+ annotated_image = BOUNDING_BOX_ANNOTATORS.annotate(
63
+ scene=annotated_image, detections=detections)
64
+ annotated_image = LABEL_ANNOTATORS.annotate(
65
+ scene=annotated_image, detections=detections, labels=labels)
66
+ return annotated_image
67
+
68
+
69
+ def process_image(
70
+ input_image: np.ndarray,
71
+ yolo_v8_confidence_threshold: float,
72
+ yolo_v9_confidence_threshold: float,
73
+ yolo_v10_confidence_threshold: float,
74
+ iou_threshold: float
75
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
76
+ # Validate iou_threshold before using it
77
+ if iou_threshold is None or not isinstance(iou_threshold, float):
78
+ iou_threshold = 0.3 # Default value, adjust as necessary
79
+
80
+ yolo_v8n_annotated_image = detect_and_annotate(
81
+ YOLO_V8N_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
82
+ yolo_v8s_annotated_image = detect_and_annotate(
83
+ YOLO_V8S_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
84
+ yolo_8m_annotated_image = detect_and_annotate(
85
+ YOLO_V8M_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
86
+
87
+ return (
88
+ yolo_v8n_annotated_image,
89
+ yolo_v8s_annotated_image,
90
+ yolo_8m_annotated_image
91
+ )
92
+
93
+
94
+ yolo_v8N_confidence_threshold_component = gr.Slider(
95
+ minimum=0,
96
+ maximum=1.0,
97
+ value=0.3,
98
+ step=0.01,
99
+ label="YOLOv8N Confidence Threshold",
100
+ info=(
101
+ "The confidence threshold for the YOLO model. Lower the threshold to "
102
+ "reduce false negatives, enhancing the model's sensitivity to detect "
103
+ "sought-after objects. Conversely, increase the threshold to minimize false "
104
+ "positives, preventing the model from identifying objects it shouldn't."
105
+ ))
106
+
107
+ yolo_v8S_confidence_threshold_component = gr.Slider(
108
+ minimum=0,
109
+ maximum=1.0,
110
+ value=0.3,
111
+ step=0.01,
112
+ label="YOLOv8S Confidence Threshold",
113
+ info=(
114
+ "The confidence threshold for the YOLO model. Lower the threshold to "
115
+ "reduce false negatives, enhancing the model's sensitivity to detect "
116
+ "sought-after objects. Conversely, increase the threshold to minimize false "
117
+ "positives, preventing the model from identifying objects it shouldn't."
118
+ ))
119
+
120
+ yolo_v8M_confidence_threshold_component = gr.Slider(
121
+ minimum=0,
122
+ maximum=1.0,
123
+ value=0.3,
124
+ step=0.01,
125
+ label="YOLOv8M Confidence Threshold",
126
+ info=(
127
+ "The confidence threshold for the YOLO model. Lower the threshold to "
128
+ "reduce false negatives, enhancing the model's sensitivity to detect "
129
+ "sought-after objects. Conversely, increase the threshold to minimize false "
130
+ "positives, preventing the model from identifying objects it shouldn't."
131
+ ))
132
+
133
+ iou_threshold_component = gr.Slider(
134
+ minimum=0,
135
+ maximum=1.0,
136
+ value=0.5,
137
+ step=0.01,
138
+ label="IoU Threshold",
139
+ info=(
140
+ "The Intersection over Union (IoU) threshold for non-maximum suppression. "
141
+ "Decrease the value to lessen the occurrence of overlapping bounding boxes, "
142
+ "making the detection process stricter. On the other hand, increase the value "
143
+ "to allow more overlapping bounding boxes, accommodating a broader range of "
144
+ "detections."
145
+ ))
146
+
147
+
148
+ with gr.Blocks() as demo:
149
+ gr.Markdown(MARKDOWN)
150
+ with gr.Accordion("Configuration", open=False):
151
+ with gr.Row():
152
+ yolo_v8N_confidence_threshold_component.render()
153
+ yolo_v8S_confidence_threshold_component.render()
154
+ yolo_v8M_confidence_threshold_component.render()
155
+ iou_threshold_component.render()
156
+ with gr.Row():
157
+ input_image_component = gr.Image(
158
+ type='pil',
159
+ label='Input'
160
+ )
161
+ yolo_v8n_output_image_component = gr.Image(
162
+ type='pil',
163
+ label='YOLOv8N'
164
+ )
165
+ with gr.Row():
166
+ yolo_v8s_output_image_component = gr.Image(
167
+ type='pil',
168
+ label='YOLOv8S'
169
+ )
170
+ yolo_v8m_output_image_component = gr.Image(
171
+ type='pil',
172
+ label='YOLOv8M'
173
+ )
174
+ submit_button_component = gr.Button(
175
+ value='Submit',
176
+ scale=1,
177
+ variant='primary'
178
+ )
179
+ gr.Examples(
180
+ fn=process_image,
181
+ examples=IMAGE_EXAMPLES,
182
+ inputs=[
183
+ input_image_component,
184
+ yolo_v8N_confidence_threshold_component,
185
+ yolo_v8S_confidence_threshold_component,
186
+ yolo_v8M_confidence_threshold_component,
187
+ iou_threshold_component
188
+ ],
189
+ outputs=[
190
+ yolo_v8n_output_image_component,
191
+ yolo_v8s_output_image_component,
192
+ yolo_v8m_output_image_component
193
+ ]
194
+ )
195
+
196
+ submit_button_component.click(
197
+ fn=process_image,
198
+ inputs=[
199
+ input_image_component,
200
+ yolo_v8N_confidence_threshold_component,
201
+ yolo_v8S_confidence_threshold_component,
202
+ yolo_v8M_confidence_threshold_component,
203
+ iou_threshold_component
204
+ ],
205
+ outputs=[
206
+ yolo_v8n_output_image_component,
207
+ yolo_v8s_output_image_component,
208
+ yolo_v8m_output_image_component
209
+ ]
210
+ )
211
+
212
+ demo.launch(debug=False, show_error=True, max_threads=1)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ setuptools<70.0.0
2
+ awscli==1.29.54
3
+ gradio==4.19.2
4
+ inference==0.13.0
5
+ supervision==0.20.0