Spaces:
Runtime error
Runtime error
File size: 5,221 Bytes
bbbe2ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
# remove warning message
import json
import os
from typing import final
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# required library
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from local_utils import detect_lp, getPath
from os.path import splitext,basename
from keras.models import model_from_json
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.mobilenet_v2 import preprocess_input
from sklearn.preprocessing import LabelEncoder
import glob
import gradio as gr
from transfer import load_model
def sort_contours(cnts,reverse = False):
i = 0
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
return cnts
def predict_from_model(image,model,labels):
image = cv2.resize(image,(80,80))
image = np.stack((image,)*3, axis=-1)
prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])
return prediction
def classify(img,resize=False,Dmax=650, Dmin = 270):
wpod_net_path = "wpod-net.json"
wpod_net = load_model(wpod_net_path)
##preprocess_image
#img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255
if resize:
img = cv2.resize(img, (224,224))
##get_plate
vehicle = img
ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
side = int(ratio * Dmin)
bound_dim = min(side, Dmax)
_ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5)
if (len(LpImg)): #check if there is at least one license image
# Scales, calculates absolute values, and converts the result to 8-bit.
plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
# convert to grayscale and blur the image
gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
# Applied inversed thresh_binary
binary = cv2.threshold(blur, 180, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
test_roi = plate_image.copy()
crop_characters = []
digit_w, digit_h = 30, 60
for c in sort_contours(cont):
(x, y, w, h) = cv2.boundingRect(c)
ratio = h/w
if 1<=ratio<=3.5: # Only select contour with defined ratio
if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate
# Draw bounding box arroung digit number
cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255,0), 2)
# Sperate number and gibe prediction
curr_num = thre_mor[y:y+h,x:x+w]
curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
_, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
crop_characters.append(curr_num)
#print("Detect {} letters...".format(len(crop_characters)))
# Load model architecture, weight and labels
json_file = open('MobileNets_character_recognition.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights("License_character_recognition_weight.h5")
labels = LabelEncoder()
labels.classes_ = np.load('license_character_classes.npy')
#fig = plt.figure(figsize=(15,3))
#cols = len(crop_characters)
#grid = gridspec.GridSpec(ncols=cols,nrows=1,figure=fig)
final_string = ''
for i,character in enumerate(crop_characters):
#fig.add_subplot(grid[i])
title = np.array2string(predict_from_model(character,model,labels))
#plt.title('{}'.format(title.strip("'[]"),fontsize=20))
final_string+=title.strip("'[]")
#plt.axis(False)
#plt.imshow(character,cmap='gray')
try:
cols = len(crop_characters)
except ValueError:
return "No Plate Detected"
else:
if len(crop_characters) == 0:
return "No Plate Detected"
else:
return final_string
gr.Interface(fn=classify,
inputs=gr.inputs.Image(),
outputs="text",
title = "Plate Number Recognition",
examples = ['29Z5550.jpeg', 'germany_car_plate.jpg', 'india_car_plate.jpg', 'turkey_car_plate.jpg', 'vietnam_car_rectangle_plate.jpg'],
description="Automaticall Recognize the symbols contained in the number plates of a motor vehicle when read from an image provided. It will help the authorities to automatically detect motor vehicle that will violate number-coding scheme.",
allow_flagging="never").launch(inbrowser=True)
#classify("C:/Users/JomerJuan/Documents/Deep Learning/Plate Number Recognition/Plate_examples/germany_car_plate.jpg",resize=False,Dmax=650, Dmin = 270)
|