Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -168,39 +168,10 @@ async def predict_single_dog(image):
|
|
168 |
return top1_prob, topk_breeds, topk_probs_percent
|
169 |
|
170 |
|
171 |
-
async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.55):
|
172 |
-
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
173 |
-
dogs = []
|
174 |
-
boxes = []
|
175 |
-
for box in results.boxes:
|
176 |
-
if box.cls == 16: # COCO dataset class for dog is 16
|
177 |
-
xyxy = box.xyxy[0].tolist()
|
178 |
-
confidence = box.conf.item()
|
179 |
-
boxes.append((xyxy, confidence))
|
180 |
-
|
181 |
-
if not boxes:
|
182 |
-
dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
183 |
-
else:
|
184 |
-
nms_boxes = non_max_suppression(boxes, iou_threshold)
|
185 |
-
|
186 |
-
for box, confidence in nms_boxes:
|
187 |
-
x1, y1, x2, y2 = box
|
188 |
-
w, h = x2 - x1, y2 - y1
|
189 |
-
x1 = max(0, x1 - w * 0.05)
|
190 |
-
y1 = max(0, y1 - h * 0.05)
|
191 |
-
x2 = min(image.width, x2 + w * 0.05)
|
192 |
-
y2 = min(image.height, y2 + h * 0.05)
|
193 |
-
cropped_image = image.crop((x1, y1, x2, y2))
|
194 |
-
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
195 |
-
|
196 |
-
return dogs
|
197 |
-
|
198 |
-
|
199 |
-
# async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5):
|
200 |
# results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
201 |
# dogs = []
|
202 |
# boxes = []
|
203 |
-
|
204 |
# for box in results.boxes:
|
205 |
# if box.cls == 16: # COCO dataset class for dog is 16
|
206 |
# xyxy = box.xyxy[0].tolist()
|
@@ -213,28 +184,57 @@ async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.55):
|
|
213 |
# nms_boxes = non_max_suppression(boxes, iou_threshold)
|
214 |
|
215 |
# for box, confidence in nms_boxes:
|
216 |
-
# x1, y1, x2, y2 =
|
|
|
|
|
|
|
|
|
|
|
217 |
# cropped_image = image.crop((x1, y1, x2, y2))
|
218 |
# dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
219 |
|
220 |
-
# # 應用過濾器來移除可能的錯誤檢測
|
221 |
-
# dogs = filter_detections(dogs, (image.width, image.height))
|
222 |
-
|
223 |
# return dogs
|
224 |
|
225 |
-
|
226 |
-
|
227 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
|
234 |
-
|
235 |
-
|
236 |
|
237 |
-
|
238 |
|
239 |
|
240 |
def non_max_suppression(boxes, iou_threshold):
|
|
|
168 |
return top1_prob, topk_breeds, topk_probs_percent
|
169 |
|
170 |
|
171 |
+
# async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.55):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
# results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
173 |
# dogs = []
|
174 |
# boxes = []
|
|
|
175 |
# for box in results.boxes:
|
176 |
# if box.cls == 16: # COCO dataset class for dog is 16
|
177 |
# xyxy = box.xyxy[0].tolist()
|
|
|
184 |
# nms_boxes = non_max_suppression(boxes, iou_threshold)
|
185 |
|
186 |
# for box, confidence in nms_boxes:
|
187 |
+
# x1, y1, x2, y2 = box
|
188 |
+
# w, h = x2 - x1, y2 - y1
|
189 |
+
# x1 = max(0, x1 - w * 0.05)
|
190 |
+
# y1 = max(0, y1 - h * 0.05)
|
191 |
+
# x2 = min(image.width, x2 + w * 0.05)
|
192 |
+
# y2 = min(image.height, y2 + h * 0.05)
|
193 |
# cropped_image = image.crop((x1, y1, x2, y2))
|
194 |
# dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
195 |
|
|
|
|
|
|
|
196 |
# return dogs
|
197 |
|
198 |
+
|
199 |
+
async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5):
|
200 |
+
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
201 |
+
dogs = []
|
202 |
+
boxes = []
|
203 |
+
|
204 |
+
for box in results.boxes:
|
205 |
+
if box.cls == 16: # COCO dataset class for dog is 16
|
206 |
+
xyxy = box.xyxy[0].tolist()
|
207 |
+
confidence = box.conf.item()
|
208 |
+
boxes.append((xyxy, confidence))
|
209 |
+
|
210 |
+
if not boxes:
|
211 |
+
dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
212 |
+
else:
|
213 |
+
nms_boxes = non_max_suppression(boxes, iou_threshold)
|
214 |
+
|
215 |
+
for box, confidence in nms_boxes:
|
216 |
+
x1, y1, x2, y2 = [int(coord) for coord in box]
|
217 |
+
cropped_image = image.crop((x1, y1, x2, y2))
|
218 |
+
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
219 |
+
|
220 |
+
# 應用過濾器來移除可能的錯誤檢測
|
221 |
+
dogs = filter_detections(dogs, (image.width, image.height))
|
222 |
+
|
223 |
+
return dogs
|
224 |
+
|
225 |
+
def filter_detections(dogs, image_size):
|
226 |
+
filtered_dogs = []
|
227 |
+
image_area = image_size[0] * image_size[1]
|
228 |
|
229 |
+
for dog in dogs:
|
230 |
+
_, _, box = dog
|
231 |
+
dog_area = (box[2] - box[0]) * (box[3] - box[1])
|
232 |
+
area_ratio = dog_area / image_area
|
233 |
|
234 |
+
if 0.01 < area_ratio < 0.9: # 過濾掉太小或太大的檢測框
|
235 |
+
filtered_dogs.append(dog)
|
236 |
|
237 |
+
return filtered_dogs
|
238 |
|
239 |
|
240 |
def non_max_suppression(boxes, iou_threshold):
|