File size: 2,692 Bytes
c5a77dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
from paddleocr import PaddleOCR
from PIL import Image

import json
import gradio as gr
import numpy as np
import cv2
import torch

model = torch.hub.load('ultralytics/yolov5', 'custom', path='./best_Plate.pt')  # local model

def get_random_color():
    c = tuple(np.random.randint(0, 256, 3).tolist())
    return c

def draw_ocr_bbox(image, boxes, colors):
    print(colors)
    box_num = len(boxes)
    for i in range(box_num):
        box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
        image = cv2.polylines(np.array(image), [box], True, colors[i], 2)
    return image

def inference(img: Image.Image, lang, confidence):
    ocr = PaddleOCR(use_angle_cls=True, lang=lang, use_gpu=False)
    # img_path = img.name
    det_img = model(img)
    det_croppeds = det_img.crop(save=False)
    img_render = det_img.render()[0]
    if len(det_croppeds) > 0:
      img = det_croppeds[0]['im']
      img = Image.fromarray(img)

    img2np = np.array(img)
    results = ocr.ocr(img2np, cls=True)
    result = results[0]
    if result == None:
      return img_render, None, None

    image = img.convert('RGB')
    boxes = [line[0] for line in result]
    txts = [line[1][0] for line in result]
    scores = [line[1][1] for line in result]
    
    final_result = [dict(boxes=box, txt=txt, score=score, _c=get_random_color()) for box, txt, score in zip(boxes, txts, scores)]
    final_result = [item for item in final_result if item['score'] > confidence]

    im_show = draw_ocr_bbox(image, [item['boxes'] for item in final_result], [item['_c'] for item in final_result])
    im_show = Image.fromarray(im_show)
    data = [[json.dumps(item['boxes']), round(item['score'], 3), item['txt']] for item in final_result]
    return img_render, im_show, data

title = 'License Plate'
description = 'Demo License Plate Recognition'

examples = [
    # ['example_imgs/example.jpg','en', 0.5],
]

css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"

if __name__ == '__main__':
    demo = gr.Interface(
        inference,
        [
            gr.Image(type='pil', label='Input'),
            gr.Dropdown(choices=['en', 'ar'], value='en', label='Language'),
            gr.Slider(0.1, 1, 0.5, step=0.1, label='Confidence Threshold')
        ],
        [
            gr.Image(type='pil', label='License Plate Detection'),
            gr.Image(type='pil', label='License Plate'),
            gr.Dataframe(headers=[ 'bbox', 'score', 'text'], label='Result'),
        ],
        title=title,
        description=description,
        # examples=examples,
        css=css,
    )
    demo.queue(max_size=10)
    demo.launch(debug=True, server_name="0.0.0.0")