jsjuan commited on
Commit
ad3086d
1 Parent(s): 3ccdc52

Upload test.ipynb

Browse files
Files changed (1) hide show
  1. test.ipynb +356 -0
test.ipynb ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "# remove warning message\n",
10
+ "import os\n",
11
+ "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n",
12
+ "\n",
13
+ "# required library\n",
14
+ "import cv2\n",
15
+ "import numpy as np\n",
16
+ "import matplotlib.pyplot as plt\n",
17
+ "import matplotlib.gridspec as gridspec\n",
18
+ "from local_utils import detect_lp\n",
19
+ "from os.path import splitext,basename\n",
20
+ "from keras.models import model_from_json\n",
21
+ "from keras.preprocessing.image import load_img, img_to_array\n",
22
+ "from keras.applications.mobilenet_v2 import preprocess_input\n",
23
+ "from sklearn.preprocessing import LabelEncoder\n",
24
+ "import glob"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": 63,
30
+ "metadata": {},
31
+ "outputs": [],
32
+ "source": [
33
+ "def load_model(path):\n",
34
+ " try:\n",
35
+ " path = splitext(path)[0]\n",
36
+ " with open('%s.json' % path, 'r') as json_file:\n",
37
+ " model_json = json_file.read()\n",
38
+ " model = model_from_json(model_json, custom_objects={})\n",
39
+ " model.load_weights('%s.h5' % path)\n",
40
+ " print(\"Loading model successfully...\")\n",
41
+ " return model\n",
42
+ " except Exception as e:\n",
43
+ " print(e)"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": 64,
49
+ "metadata": {},
50
+ "outputs": [
51
+ {
52
+ "name": "stdout",
53
+ "output_type": "stream",
54
+ "text": [
55
+ "Loading model successfully...\n"
56
+ ]
57
+ }
58
+ ],
59
+ "source": [
60
+ "wpod_net_path = \"wpod-net.json\"\n",
61
+ "wpod_net = load_model(wpod_net_path)"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 74,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "def preprocess_image(image_path,resize=False):\n",
71
+ " img = cv2.imread(image_path)\n",
72
+ " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
73
+ " img = img / 255\n",
74
+ " if resize:\n",
75
+ " img = cv2.resize(img, (224,224))\n",
76
+ " return img\n",
77
+ "\n",
78
+ "def get_plate(image_path, Dmax=650, Dmin = 270):\n",
79
+ " vehicle = preprocess_image(image_path)\n",
80
+ " ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])\n",
81
+ " side = int(ratio * Dmin)\n",
82
+ " bound_dim = min(side, Dmax)\n",
83
+ " _ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5)\n",
84
+ " return vehicle, LpImg, cor\n",
85
+ "\n",
86
+ "test_image_path = \"Plate_examples/india_car_plate.jpg\"\n",
87
+ "vehicle, LpImg,cor = get_plate(test_image_path)\n",
88
+ "\n"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 75,
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "if (len(LpImg)): #check if there is at least one license image\n",
98
+ " # Scales, calculates absolute values, and converts the result to 8-bit.\n",
99
+ " plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))\n",
100
+ " \n",
101
+ " # convert to grayscale and blur the image\n",
102
+ " gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)\n",
103
+ " blur = cv2.GaussianBlur(gray,(7,7),0)\n",
104
+ " \n",
105
+ " # Applied inversed thresh_binary \n",
106
+ " binary = cv2.threshold(blur, 180, 255,\n",
107
+ " cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n",
108
+ " \n",
109
+ " kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n",
110
+ " thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)\n",
111
+ "\n",
112
+ " \n",
113
+ "\n",
114
+ "# plt.savefig(\"threshding.png\", dpi=300)"
115
+ ]
116
+ },
117
+ {
118
+ "cell_type": "code",
119
+ "execution_count": 77,
120
+ "metadata": {},
121
+ "outputs": [
122
+ {
123
+ "name": "stdout",
124
+ "output_type": "stream",
125
+ "text": [
126
+ "Detect 10 letters...\n"
127
+ ]
128
+ },
129
+ {
130
+ "data": {
131
+ "text/plain": [
132
+ "<Figure size 720x432 with 0 Axes>"
133
+ ]
134
+ },
135
+ "metadata": {},
136
+ "output_type": "display_data"
137
+ }
138
+ ],
139
+ "source": [
140
+ "# Create sort_contours() function to grab the contour of each digit from left to right\n",
141
+ "def sort_contours(cnts,reverse = False):\n",
142
+ " i = 0\n",
143
+ " boundingBoxes = [cv2.boundingRect(c) for c in cnts]\n",
144
+ " (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),\n",
145
+ " key=lambda b: b[1][i], reverse=reverse))\n",
146
+ " return cnts\n",
147
+ "\n",
148
+ "cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
149
+ "\n",
150
+ "# creat a copy version \"test_roi\" of plat_image to draw bounding box\n",
151
+ "test_roi = plate_image.copy()\n",
152
+ "\n",
153
+ "# Initialize a list which will be used to append charater image\n",
154
+ "crop_characters = []\n",
155
+ "\n",
156
+ "# define standard width and height of character\n",
157
+ "digit_w, digit_h = 30, 60\n",
158
+ "\n",
159
+ "for c in sort_contours(cont):\n",
160
+ " (x, y, w, h) = cv2.boundingRect(c)\n",
161
+ " ratio = h/w\n",
162
+ " if 1<=ratio<=3.5: # Only select contour with defined ratio\n",
163
+ " if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate\n",
164
+ " # Draw bounding box arroung digit number\n",
165
+ " cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255,0), 2)\n",
166
+ "\n",
167
+ " # Sperate number and gibe prediction\n",
168
+ " curr_num = thre_mor[y:y+h,x:x+w]\n",
169
+ " curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))\n",
170
+ " _, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
171
+ " crop_characters.append(curr_num)\n",
172
+ "\n",
173
+ "print(\"Detect {} letters...\".format(len(crop_characters)))\n",
174
+ "fig = plt.figure(figsize=(10,6))\n",
175
+ "#plt.axis(False)\n",
176
+ "#plt.imshow(test_roi)\n",
177
+ "#plt.savefig('grab_digit_contour.png',dpi=300)"
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "code",
182
+ "execution_count": 78,
183
+ "metadata": {},
184
+ "outputs": [
185
+ {
186
+ "name": "stdout",
187
+ "output_type": "stream",
188
+ "text": [
189
+ "[INFO] Model loaded successfully...\n",
190
+ "[INFO] Labels loaded successfully...\n"
191
+ ]
192
+ }
193
+ ],
194
+ "source": [
195
+ "# Load model architecture, weight and labels\n",
196
+ "json_file = open('MobileNets_character_recognition.json', 'r')\n",
197
+ "loaded_model_json = json_file.read()\n",
198
+ "json_file.close()\n",
199
+ "model = model_from_json(loaded_model_json)\n",
200
+ "model.load_weights(\"License_character_recognition_weight.h5\")\n",
201
+ "print(\"[INFO] Model loaded successfully...\")\n",
202
+ "\n",
203
+ "labels = LabelEncoder()\n",
204
+ "labels.classes_ = np.load('license_character_classes.npy')\n",
205
+ "print(\"[INFO] Labels loaded successfully...\")"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": 79,
211
+ "metadata": {},
212
+ "outputs": [],
213
+ "source": [
214
+ "# pre-processing input images and pedict with model\n",
215
+ "def predict_from_model(image,model,labels):\n",
216
+ " image = cv2.resize(image,(80,80))\n",
217
+ " image = np.stack((image,)*3, axis=-1)\n",
218
+ " prediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])\n",
219
+ " return prediction"
220
+ ]
221
+ },
222
+ {
223
+ "cell_type": "code",
224
+ "execution_count": 80,
225
+ "metadata": {},
226
+ "outputs": [
227
+ {
228
+ "name": "stdout",
229
+ "output_type": "stream",
230
+ "text": [
231
+ "MH12DE1433\n"
232
+ ]
233
+ },
234
+ {
235
+ "data": {
236
+ "text/plain": [
237
+ "<Figure size 1080x216 with 0 Axes>"
238
+ ]
239
+ },
240
+ "metadata": {},
241
+ "output_type": "display_data"
242
+ }
243
+ ],
244
+ "source": [
245
+ "fig = plt.figure(figsize=(15,3))\n",
246
+ "cols = len(crop_characters)\n",
247
+ "grid = gridspec.GridSpec(ncols=cols,nrows=1,figure=fig)\n",
248
+ "\n",
249
+ "final_string = ''\n",
250
+ "for i,character in enumerate(crop_characters):\n",
251
+ " #fig.add_subplot(grid[i])\n",
252
+ " title = np.array2string(predict_from_model(character,model,labels))\n",
253
+ " #plt.title('{}'.format(title.strip(\"'[]\"),fontsize=20))\n",
254
+ " final_string+=title.strip(\"'[]\")\n",
255
+ " #plt.axis(False)\n",
256
+ " #plt.imshow(character,cmap='gray')\n",
257
+ "\n",
258
+ "print(final_string)\n"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": 3,
264
+ "metadata": {},
265
+ "outputs": [
266
+ {
267
+ "name": "stdout",
268
+ "output_type": "stream",
269
+ "text": [
270
+ "[[[ 85 57 76]\n",
271
+ " [ 93 65 84]\n",
272
+ " [ 95 68 84]\n",
273
+ " ...\n",
274
+ " [173 151 145]\n",
275
+ " [170 148 143]\n",
276
+ " [169 147 142]]\n",
277
+ "\n",
278
+ " [[ 85 59 75]\n",
279
+ " [ 94 69 83]\n",
280
+ " [ 99 74 88]\n",
281
+ " ...\n",
282
+ " [173 151 145]\n",
283
+ " [171 149 144]\n",
284
+ " [170 148 143]]\n",
285
+ "\n",
286
+ " [[113 92 101]\n",
287
+ " [119 98 106]\n",
288
+ " [122 101 109]\n",
289
+ " ...\n",
290
+ " [174 152 146]\n",
291
+ " [172 150 145]\n",
292
+ " [171 149 144]]\n",
293
+ "\n",
294
+ " ...\n",
295
+ "\n",
296
+ " [[204 216 228]\n",
297
+ " [201 213 223]\n",
298
+ " [206 218 228]\n",
299
+ " ...\n",
300
+ " [102 76 62]\n",
301
+ " [ 94 68 54]\n",
302
+ " [ 92 66 52]]\n",
303
+ "\n",
304
+ " [[206 217 231]\n",
305
+ " [200 211 225]\n",
306
+ " [205 216 230]\n",
307
+ " ...\n",
308
+ " [ 99 73 61]\n",
309
+ " [ 92 68 56]\n",
310
+ " [ 93 69 57]]\n",
311
+ "\n",
312
+ " [[216 226 243]\n",
313
+ " [227 237 254]\n",
314
+ " [221 232 246]\n",
315
+ " ...\n",
316
+ " [ 92 66 54]\n",
317
+ " [ 86 62 50]\n",
318
+ " [ 91 67 55]]]\n"
319
+ ]
320
+ }
321
+ ],
322
+ "source": [
323
+ "\n",
324
+ "img = cv2.imread(\"C:/Users/JomerJuan/Documents/Deep Learning/Plate Number Recognition/Plate_examples/germany_car_plate.jpg\")\n",
325
+ "\n",
326
+ "img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
327
+ "print(img)"
328
+ ]
329
+ }
330
+ ],
331
+ "metadata": {
332
+ "interpreter": {
333
+ "hash": "6e8d6bc3219a43fccaa16aa1e841da92ab3f69dc51828f7269e34e3f0779a8af"
334
+ },
335
+ "kernelspec": {
336
+ "display_name": "Python 3.8.3 ('Plate_Number_Recognition': venv)",
337
+ "language": "python",
338
+ "name": "python3"
339
+ },
340
+ "language_info": {
341
+ "codemirror_mode": {
342
+ "name": "ipython",
343
+ "version": 3
344
+ },
345
+ "file_extension": ".py",
346
+ "mimetype": "text/x-python",
347
+ "name": "python",
348
+ "nbconvert_exporter": "python",
349
+ "pygments_lexer": "ipython3",
350
+ "version": "3.8.3"
351
+ },
352
+ "orig_nbformat": 4
353
+ },
354
+ "nbformat": 4,
355
+ "nbformat_minor": 2
356
+ }