jsjuan commited on
Commit
bbbe2ac
1 Parent(s): 9d8a36e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -0
app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # remove warning message
2
+ import json
3
+ import os
4
+ from typing import final
5
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
6
+
7
+ # required library
8
+ import cv2
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
+ import matplotlib.gridspec as gridspec
12
+ from local_utils import detect_lp, getPath
13
+ from os.path import splitext,basename
14
+ from keras.models import model_from_json
15
+ from keras.preprocessing.image import load_img, img_to_array
16
+ from keras.applications.mobilenet_v2 import preprocess_input
17
+ from sklearn.preprocessing import LabelEncoder
18
+ import glob
19
+ import gradio as gr
20
+ from transfer import load_model
21
+
22
+ def sort_contours(cnts,reverse = False):
23
+ i = 0
24
+ boundingBoxes = [cv2.boundingRect(c) for c in cnts]
25
+ (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
26
+ key=lambda b: b[1][i], reverse=reverse))
27
+ return cnts
28
+
29
+ def predict_from_model(image,model,labels):
30
+ image = cv2.resize(image,(80,80))
31
+ image = np.stack((image,)*3, axis=-1)
32
+ prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])
33
+ return prediction
34
+
35
+ def classify(img,resize=False,Dmax=650, Dmin = 270):
36
+ wpod_net_path = "wpod-net.json"
37
+ wpod_net = load_model(wpod_net_path)
38
+ ##preprocess_image
39
+ #img = cv2.imread(image_path)
40
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
41
+ img = img / 255
42
+ if resize:
43
+ img = cv2.resize(img, (224,224))
44
+
45
+ ##get_plate
46
+ vehicle = img
47
+ ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
48
+ side = int(ratio * Dmin)
49
+ bound_dim = min(side, Dmax)
50
+ _ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5)
51
+
52
+
53
+
54
+ if (len(LpImg)): #check if there is at least one license image
55
+ # Scales, calculates absolute values, and converts the result to 8-bit.
56
+ plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
57
+
58
+ # convert to grayscale and blur the image
59
+ gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
60
+ blur = cv2.GaussianBlur(gray,(7,7),0)
61
+
62
+ # Applied inversed thresh_binary
63
+ binary = cv2.threshold(blur, 180, 255,
64
+ cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
65
+
66
+ kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
67
+ thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
68
+
69
+
70
+ cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
71
+
72
+ test_roi = plate_image.copy()
73
+ crop_characters = []
74
+ digit_w, digit_h = 30, 60
75
+
76
+ for c in sort_contours(cont):
77
+ (x, y, w, h) = cv2.boundingRect(c)
78
+ ratio = h/w
79
+ if 1<=ratio<=3.5: # Only select contour with defined ratio
80
+ if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate
81
+ # Draw bounding box arroung digit number
82
+ cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255,0), 2)
83
+
84
+ # Sperate number and gibe prediction
85
+ curr_num = thre_mor[y:y+h,x:x+w]
86
+ curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
87
+ _, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
88
+ crop_characters.append(curr_num)
89
+
90
+ #print("Detect {} letters...".format(len(crop_characters)))
91
+ # Load model architecture, weight and labels
92
+ json_file = open('MobileNets_character_recognition.json', 'r')
93
+ loaded_model_json = json_file.read()
94
+ json_file.close()
95
+ model = model_from_json(loaded_model_json)
96
+ model.load_weights("License_character_recognition_weight.h5")
97
+
98
+ labels = LabelEncoder()
99
+ labels.classes_ = np.load('license_character_classes.npy')
100
+
101
+ #fig = plt.figure(figsize=(15,3))
102
+ #cols = len(crop_characters)
103
+ #grid = gridspec.GridSpec(ncols=cols,nrows=1,figure=fig)
104
+
105
+ final_string = ''
106
+ for i,character in enumerate(crop_characters):
107
+ #fig.add_subplot(grid[i])
108
+ title = np.array2string(predict_from_model(character,model,labels))
109
+ #plt.title('{}'.format(title.strip("'[]"),fontsize=20))
110
+ final_string+=title.strip("'[]")
111
+ #plt.axis(False)
112
+ #plt.imshow(character,cmap='gray')
113
+
114
+ try:
115
+ cols = len(crop_characters)
116
+ except ValueError:
117
+ return "No Plate Detected"
118
+ else:
119
+ if len(crop_characters) == 0:
120
+ return "No Plate Detected"
121
+ else:
122
+ return final_string
123
+
124
+
125
+
126
+
127
+ gr.Interface(fn=classify,
128
+ inputs=gr.inputs.Image(),
129
+ outputs="text",
130
+ title = "Plate Number Recognition",
131
+ examples = ['29Z5550.jpeg', 'germany_car_plate.jpg', 'india_car_plate.jpg', 'turkey_car_plate.jpg', 'vietnam_car_rectangle_plate.jpg'],
132
+ description="Automaticall Recognize the symbols contained in the number plates of a motor vehicle when read from an image provided. It will help the authorities to automatically detect motor vehicle that will violate number-coding scheme.",
133
+ allow_flagging="never").launch(inbrowser=True)
134
+
135
+ #classify("C:/Users/JomerJuan/Documents/Deep Learning/Plate Number Recognition/Plate_examples/germany_car_plate.jpg",resize=False,Dmax=650, Dmin = 270)
136
+