File size: 5,934 Bytes
69af24f
 
 
 
 
 
 
 
17cd047
 
69af24f
c389428
69af24f
 
 
 
 
 
 
280cb6d
 
 
 
 
 
 
 
 
 
 
 
 
306ee79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58c9608
306ee79
 
 
 
 
280cb6d
 
 
 
 
 
 
 
 
 
 
69af24f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e526360
69af24f
 
 
 
 
 
 
 
306ee79
b0780d2
 
 
5342399
69af24f
 
 
40abeda
 
 
3371d2a
bea1844
9bf4d27
40abeda
 
280cb6d
2f4b7e8
69af24f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fa722d
69af24f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import gradio as gr
import re, datetime,time, cv2, numpy as np, tensorflow as tf, sys


CHARS = "ABCDEFGHIJKLMNPQRSTUVWXYZ0123456789" # exclude I, O
CHARS_DICT = {char:i for i, char in enumerate(CHARS)}
DECODE_DICT = {i:char for i, char in enumerate(CHARS)}

interpreter = tf.lite.Interpreter(model_path='detection.tflite')
#interpreter = tf.lite.Interpreter(model_path='lite-model_east-text-detector_fp16_1.tflite')
interpreter.allocate_tensors()
recog_interpreter = tf.lite.Interpreter(model_path='recognition.tflite')
recog_input_details = recog_interpreter.get_input_details()
recog_output_details = recog_interpreter.get_output_details()
recog_interpreter.resize_tensor_input(recog_input_details[0]['index'], (1, 24, 94, 3))
recog_interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()


def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
    """Return a sharpened version of the image, using an unsharp mask."""
    blurred = cv2.GaussianBlur(image, kernel_size, sigma)
    sharpened = float(amount + 1) * image - float(amount) * blurred
    sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
    sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
    sharpened = sharpened.round().astype(np.uint8)
    if threshold > 0:
        low_contrast_mask = np.absolute(image - blurred) < threshold
        np.copyto(sharpened, image, where=low_contrast_mask)
    return sharpened

def convdigplate(text) :
  dict = {'0':'O','1':'I','2':'S','3':'E','4':'A','5':'S','6':'B','7':'T','8':'B','9':'B'}
  
  dictL = {'A':'4','B':'8','C':'0','D':'0','E':'3','F':'3','G':'6','H':'4','I':'1',
           'J':'6','K':'4','L':'1','M':'4','N':'4','O':'0','P':'8','Q':'0','R':'8',
           'S':'2','T':'1','U':'0','V':'4','X':'4','Y':'9','W':'3','Z':'2'}
  
  if len(text) > 7: 
    if len(text) >= 9: 
      text = text[1:8]
    else :
      if text[3].isdigit() :
        text = text[:7]
      else :
        text = text[1:]
   
  temp = list(text) 
  
  for index in range(len(temp)):
    if index <3:
      if text[index].isdigit():
        temp[index] = dict[temp[index]]
    else :
       if not text[index].isdigit() and index != 4:
        temp[index] = dictL[temp[index]]
        
  text = "".join(temp)
  return text
    
def increase_brightness(img, value):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)

    lim = 255 - value
    v[v > lim] = 255
    v[v <= lim] += value

    final_hsv = cv2.merge((h, s, v))
    img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
    return img

def execute_text_recognition_tflite( boxes, frame, interpreter, input_details, output_details):
    x1, x2, y1, y2 = boxes[1], boxes[3], boxes[0], boxes[2]
    save_frame = frame[
        max( 0, int(y1*1079) ) : min( 1079, int(y2*1079) ),
        max( 0, int(x1*1920) ) : min( 1920, int(x2*1920) )
    ]

    # Execute text recognition
    print(frame.shape)
    test_image = cv2.resize(save_frame,(94,24))/256
    test_image = np.expand_dims(test_image,axis=0)
    test_image = test_image.astype(np.float32)
    interpreter.set_tensor(input_details[0]['index'], test_image)
    interpreter.invoke()
    output_data = interpreter.get_tensor(output_details[0]['index'])
    decoded = tf.keras.backend.ctc_decode(output_data,(24,),greedy=True)
    text = ""
    for i in np.array(decoded[0][0][0]):
        if i >-1:
            text += DECODE_DICT[i]
    # Do nothing if text is empty
    if not len(text): return 
    license_plate = text
    text[:3].replace("0",'O')
    
    textc = convdigplate(text)

    text = textc+" ( "+text+" ) "

    return text,cv2.resize(save_frame,(94,24))

def greet(image):
    #sharpened = unsharp_mask(image)
    #image = increase_brightness(sharpened, value=10) # 60 ->5qoOk.png #10 -> if8nC.png

    
    #image = cv2.resize(image, (720,480), interpolation=cv2.INTER_LINEAR)
    
    norm_img = np.zeros((image.shape[0], image.shape[1]))
    image = cv2.normalize(image, norm_img, 0, 255, cv2.NORM_MINMAX)

    resized = cv2.resize(image, (320,320), interpolation=cv2.INTER_LINEAR)
    input_data = resized.astype(np.float32)          # Set as 3D RGB float array
    input_data /= 255.                               # Normalize
    input_data = np.expand_dims(input_data, axis=0)  # Batch dimension (wrap in 4D)

    # Initialize input tensor
    interpreter.set_tensor(input_details[0]['index'], input_data)
    interpreter.invoke()
    output_data = interpreter.get_tensor(output_details[0]['index'])

    # Bounding boxes
    boxes = interpreter.get_tensor(output_details[1]['index'])

    text = None
    # For index and confidence value of the first class [0]
    for i, confidence in enumerate(output_data[0]):
        if confidence > .3:
            text, crop = execute_text_recognition_tflite(
                boxes[0][i], image,
                recog_interpreter, recog_input_details, recog_output_details,
            )
            return text, crop
image = gr.inputs.Image(shape=(1920,1080))
output_image =gr.outputs.Image(type="auto", label="Output")


title = "Automatic licence plate detection and recognition"
description = "Gradio demo for an automatic licence plate recognition system. To use it, simply upload your image of a car with a licence plate, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://ieeexplore.ieee.org/document/9071863'>Robust Real time Lightweight Automatic License plate Recognition System for Iranian License Plates</a> | <a href='https://github.com/clsandoval/LPRnet-keras'>Github Repo</a></p>"


iface = gr.Interface(
    fn=greet,
    inputs=image,
    outputs=["text",output_image],
    title = title,
    description = description,
    article=article,
    examples = [
        "3.jpg",
        "4.jpg",
    ]
    )
iface.launch()