|
from model1 import reader, np, YOLO, car_detection, lp_detection |
|
import torch |
|
from PIL import Image |
|
import cv2 |
|
from torchvision import transforms |
|
|
|
char_dect = YOLO("models/yolov8n_lpchar_det.pt") |
|
char_rec = torch.load("models/charrec.pt", map_location="cpu") |
|
|
|
|
|
def detect_cars(inputs): |
|
cars = [] |
|
|
|
car_results = car_detection.predict(source=inputs, classes=[2], conf=0.5, verbose=False) |
|
|
|
for car_result in car_results: |
|
|
|
boxes = car_result.boxes.xyxy.tolist() |
|
|
|
for box in boxes: |
|
|
|
car = car_result.orig_img[int(box[1]):int(box[3]), int(box[0]):int(box[2])] |
|
cars.append(car) |
|
return cars |
|
|
|
|
|
def detect_lp(inputs): |
|
lps = [] |
|
|
|
lp_results = lp_detection.predict(source=inputs, conf=0.5, verbose=False) |
|
|
|
for lp_result in lp_results: |
|
|
|
lp_boxes = lp_result.boxes.xyxy.tolist() |
|
|
|
for lp_box in lp_boxes: |
|
|
|
lp = lp_result.orig_img[int(lp_box[1]):int(lp_box[3]), int(lp_box[0]):int(lp_box[2])] |
|
lps.append(lp) |
|
|
|
break |
|
|
|
|
|
if len(lp_boxes) == 0: |
|
lps.append(np.zeros((100,100,3), np.uint8)) |
|
|
|
return lps |
|
|
|
|
|
def chars_lp_det(inputs): |
|
vis_lp = [] |
|
chars = [] |
|
|
|
chars_results = char_dect.predict(source=inputs, conf=0.5, verbose=False) |
|
|
|
for chars_result in chars_results: |
|
|
|
chars_boxes = chars_result.boxes.xyxy.tolist() |
|
|
|
vis = chars_result.orig_img.copy() |
|
c_list =[] |
|
for chars_box in chars_boxes: |
|
|
|
cv2.rectangle(vis, (int(chars_box[0]),int(chars_box[1])), (int(chars_box[2]), int(chars_box[3])), (0,255,0), 1) |
|
chrs = chars_result.orig_img[int(chars_box[1]):int(chars_box[3]), int(chars_box[0]):int(chars_box[2])] |
|
c_list.append(chrs) |
|
|
|
chars.append(c_list) |
|
vis_lp.append(vis) |
|
|
|
if len(vis_lp) == 0: |
|
vis_lp.append(np.zeros((100,100,3), np.uint8)) |
|
|
|
return vis_lp, chars |
|
|
|
|
|
def detect_lp_text(inputs): |
|
plate_number = [] |
|
|
|
for input in inputs: |
|
|
|
result = reader.readtext(input) |
|
|
|
|
|
if len(result) == 0: |
|
plate_number.append("not found") |
|
else: |
|
|
|
plate_number.append(result[0][1]) |
|
|
|
return plate_number |
|
|
|
def rec_lp_char(inputs): |
|
m = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'] |
|
transform = transforms.Compose([ |
|
transforms.Resize((224, 224)), |
|
transforms.ToTensor(), |
|
]) |
|
lptexts = [] |
|
for input in inputs: |
|
imgs = [transform(Image.fromarray(input[i])) for i in range(len(input))] |
|
if len(imgs) <= 1: |
|
lptexts.append("not found") |
|
continue |
|
imgs = torch.stack(imgs) |
|
output = char_rec(imgs) |
|
preds = torch.argmax(output, dim=1).tolist() |
|
lptext = "" |
|
for pred in preds: |
|
lptext += m[int(pred)] |
|
lptexts.append(lptext) |
|
return lptexts |
|
|
|
|
|
def run(inputs): |
|
|
|
|
|
|
|
inputs = inputs[0] |
|
|
|
|
|
cars = detect_cars(inputs) |
|
|
|
|
|
if len(cars) == 0: |
|
return [np.zeros((100,100,3), np.uint8)], [np.zeros((100,100,3), np.uint8)], "not found" |
|
|
|
|
|
|
|
lps = detect_lp(cars) |
|
|
|
vis_lp, chars_lp = chars_lp_det(lps) |
|
|
|
|
|
lptexts = rec_lp_char(chars_lp) |
|
|
|
|
|
|
|
|
|
|
|
|
|
return cars, vis_lp, lptexts |
|
|
|
|