Spaces:
Runtime error
Runtime error
File size: 3,852 Bytes
99acab0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import torch
import numpy as np
import cv2
import os
BASE_DIR = os.path.abspath(os.getcwd())
model_plates = torch.hub.load('ultralytics/yolov5', 'custom',
path=os.path.join(BASE_DIR, 'detector', 'static', 'plates.pt'))
# model.conf = 0.60 # NMS confidence threshold
# model.iou = 0.60 # NMS IoU threshold
# model.agnostic = False # NMS class-agnostic
# model.multi_label = False # NMS multiple labels per box
# model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
# model.max_det = 1 # maximum number of detections per image
# model.amp = False # Automatic Mixed Precision (AMP) inference
model_chars = torch.hub.load('ultralytics/yolov5', 'custom',
path=os.path.join(BASE_DIR, 'detector', 'static', 'chars.pt'))
def pad_img_to_fit_bbox(img, x1, x2, y1, y2):
img = np.pad(img, ((np.abs(np.minimum(0, y1)), np.maximum(y2 - img.shape[0], 0)),
(np.abs(np.minimum(0, x1)), np.maximum(x2 - img.shape[1], 0)), (0, 0)), mode="constant")
y1 += np.abs(np.minimum(0, y1))
y2 += np.abs(np.minimum(0, y1))
x1 += np.abs(np.minimum(0, x1))
x2 += np.abs(np.minimum(0, x1))
return img, x1, x2, y1, y2
def imcrop(img, bbox):
x1, y1, x2, y2 = bbox
if x1 < 0 or y1 < 0 or x2 > img.shape[1] or y2 > img.shape[0]:
img, x1, x2, y1, y2 = pad_img_to_fit_bbox(img, x1, x2, y1, y2)
return img[y1:y2, x1:x2, :]
def detect_plates(img):
detect = model_plates(img)
records = detect.pandas().xyxy[0].to_dict(orient='records')
plates = []
if records:
for plate in records:
xi, yi, xf, yf = int(plate['xmin']), int(plate['ymin']), int(plate['xmax']), int(plate['ymax'])
crop = imcrop(img, (xi, yi, xf, yf))
plates.append(((xi, yi), (xf, yf), crop))
return plates
def detect_chars(img):
img = cv2.resize(img, (640, 480))
detect = model_chars(img)
records = detect.pandas().xyxy[0].to_dict(orient='records')
yolo = np.squeeze(detect.render())
text = ''
if records:
records = sorted(records, key=lambda d: d['xmin'])
text = ''.join([i.get('name') for i in records])
return text, img
def save_plates(img):
detect = model_plates(img)
detect.crop(save=True)
# def yolo_detections_to_norfair_detections(yolo_detections, track_points="centroid"):
# """convert detections_as_xywh to norfair detections"""
# norfair_detections = []
#
# if track_points == "centroid":
# detections_as_xywh = yolo_detections.xywh[0]
# for detection_as_xywh in detections_as_xywh:
# centroid = np.array(
# [detection_as_xywh[0].item(), detection_as_xywh[1].item()]
# )
# scores = np.array([detection_as_xywh[4].item()])
# norfair_detections.append(
# Detection(
# points=centroid,
# scores=scores,
# label=int(detection_as_xywh[-1].item()),
# )
# )
# elif track_points == "bbox":
# detections_as_xyxy = yolo_detections.xyxy[0]
# for detection_as_xyxy in detections_as_xyxy:
# bbox = np.array(
# [
# [detection_as_xyxy[0].item(), detection_as_xyxy[1].item()],
# [detection_as_xyxy[2].item(), detection_as_xyxy[3].item()],
# ]
# )
# scores = np.array(
# [detection_as_xyxy[4].item(), detection_as_xyxy[4].item()]
# )
# norfair_detections.append(
# Detection(
# points=bbox, scores=scores, label=int(detection_as_xyxy[-1].item())
# )
# )
#
# return norfair_detections
|