File size: 3,668 Bytes
471c843
778c295
471c843
aa4b6db
 
 
 
 
778c295
 
48b70dd
70ff432
 
 
778c295
 
 
471c843
23067a4
778c295
 
 
 
 
 
 
 
 
3545902
778c295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19cb2bd
778c295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
914c7e0
778c295
 
 
914c7e0
87de8ae
23ccde6
778c295
d4696d2
 
 
 
 
 
 
 
 
914c7e0
d4696d2
cd596eb
 
a097203
 
 
 
d4696d2
471c843
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
import re, datetime,time, cv2, numpy as np, tensorflow as tf, sys


CHARS = "ABCDEFGHIJKLMNPQRSTUVWXYZ0123456789" # exclude I, O
CHARS_DICT = {char:i for i, char in enumerate(CHARS)}
DECODE_DICT = {i:char for i, char in enumerate(CHARS)}

interpreter = tf.lite.Interpreter(model_path='detection.tflite')
interpreter.allocate_tensors()
recog_interpreter = tf.lite.Interpreter(model_path='recognition2.tflite')
recog_input_details = recog_interpreter.get_input_details()
recog_output_details = recog_interpreter.get_output_details()
recog_interpreter.resize_tensor_input(recog_input_details[0]['index'], (1, 24, 94, 3))
recog_interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

    

def execute_text_recognition_tflite( boxes, frame, interpreter, input_details, output_details):
    x1, x2, y1, y2 = boxes[1], boxes[3], boxes[0], boxes[2]
    save_frame = frame[
        max( 0, int(y1*1079) ) : min( 1079, int(y2*1079) ),
        max( 0, int(x1*1920) ) : min( 1920, int(x2*1920) )
    ]

    # Execute text recognition
    print(frame.shape)
    test_image = cv2.resize(save_frame,(94,24))/256
    test_image = np.expand_dims(test_image,axis=0)
    test_image = test_image.astype(np.float32)
    interpreter.set_tensor(input_details[0]['index'], test_image)
    interpreter.invoke()
    output_data = interpreter.get_tensor(output_details[0]['index'])
    decoded = tf.keras.backend.ctc_decode(output_data,(24,),greedy=False)
    text = ""
    for i in np.array(decoded[0][0][0]):
        if i >-1:
            text += DECODE_DICT[i]
    # Do nothing if text is empty
    if not len(text): return 
    license_plate = text
    text[:3].replace("0",'O')

    return text,cv2.resize(save_frame,(94,24))

def greet(image):
    resized = cv2.resize(image, (320,320), interpolation=cv2.INTER_AREA)
    input_data = resized.astype(np.float32)          # Set as 3D RGB float array
    input_data /= 255.                               # Normalize
    input_data = np.expand_dims(input_data, axis=0)  # Batch dimension (wrap in 4D)

    # Initialize input tensor
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    output_data = interpreter.get_tensor(output_details[0]['index'])

    # Bounding boxes
    boxes = interpreter.get_tensor(output_details[1]['index'])

    text = None
    # For index and confidence value of the first class [0]
    for i, confidence in enumerate(output_data[0]):
        if confidence > .3:
            text, crop = execute_text_recognition_tflite(
                boxes[0][i], image,
                recog_interpreter, recog_input_details, recog_output_details,
            )
            return text, crop
image = gr.inputs.Image(shape=(1920,1080))
output_image =gr.outputs.Image(type="auto", label="Output")


title = "Automatic licence plate detection and recognition"
description = "Gradio demo for an automatic licence plate recognition system. To use it, simply upload your image of a car with a licence plate, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://ieeexplore.ieee.org/document/9071863'>Robust Real time Lightweight Automatic License plate Recognition System for Iranian License Plates</a> | <a href='https://github.com/clsandoval/LPRnet-keras'>Github Repo</a></p>"


iface = gr.Interface(
    fn=greet,
    inputs=image,
    outputs=["text",output_image],
    title = title,
    description = description,
    article=article,
    examples = [
        "3.jpg",
        "4.jpg",
    ]
    )
iface.launch()