File size: 3,249 Bytes
53df63d 0a2675c 53df63d ad68d09 30f252b ad68d09 53df63d ad68d09 53df63d 0a2675c ad68d09 53df63d ad68d09 53df63d 0a2675c ad68d09 53df63d 30f252b 53df63d 0a2675c 53df63d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
import model1 as m1
import model2 as m2
cars = []
lps = []
lp_texts = []
counter = 0
# this is the main function that passes the images to the model 1
def model1(image):
global cars, lps, lp_texts, counter
(cars, lps, lp_texts) = m1.run([image])
counter = 0
return cars[0], lps[0], lp_texts[0]
# this is the main function that passes the images to the model 1
def model2(image):
global lps, lp_texts, counter
(lps, lp_texts) = m2.run([image])
counter = 0
return lps[0], lp_texts[0]
# this is the main function that passes the images to the model 1
def model3(image):
global cars, lps, lp_texts, counter
(cars, lps, lp_texts) = m3.run([image])
counter = 0
return cars[0], lps[0], lp_texts[0]
# function to go to next detected car licence plate
def next_img():
global counter
counter += 1
index = int(counter % len(cars))
return cars[index], lps[index], lp_texts[index]
# function to go to prev detected car licence plate
def prev_img():
global counter
counter -= 1
index = int(counter % len(cars))
return cars[index], lps[index], lp_texts[index]
# function to go to next detected licence plate
def next_img_lp():
global counter
counter += 1
index = int(counter % len(lps))
return lps[index], lp_texts[index]
# function to go to prev detected licence plate
def prev_img_lp():
global counter
counter -= 1
index = int(counter % len(lps))
return lps[index], lp_texts[index]
# this code is responcible for the front end part of the page
with gr.Blocks() as demo:
gr.Markdown("## ANPR Project")
with gr.Tab("Model 1"):
gr.Markdown("Using 3 different ML models")
gr.Markdown("YOLOv8n for car dection + YOLOv8n for LP detection + easy ocr for text detection")
img = gr.Image(label="Input")
submit = gr.Button(value="submit")
with gr.Row():
car = gr.Image(label="Car")
lp = gr.Image(label="Licence Plate")
lp_text = gr.Text(label="Plate Number")
with gr.Row():
next = gr.Button(value="next")
prev = gr.Button(value="prev")
submit.click(model1, inputs=[img], outputs=[car, lp, lp_text])
next.click(next_img, outputs=[car, lp, lp_text])
prev.click(prev_img, outputs=[car, lp, lp_text])
with gr.Tab("Model 2"):
gr.Markdown("Using 2 different ML models")
gr.Markdown("YOLOv8m for car dection + easy ocr for text detection")
gr.Markdown("YOLOv8m for car dection is trained on a large dataset of 25K training images")
img2 = gr.Image(label="Input")
submit2 = gr.Button(value="submit")
with gr.Row():
lp2 = gr.Image(label="Licence Plate")
lp_text2 = gr.Text(label="Plate Number")
with gr.Row():
next2 = gr.Button(value="next")
prev2 = gr.Button(value="prev")
submit2.click(model2, inputs=[img2], outputs=[lp2, lp_text2])
next2.click(next_img_lp, outputs=[lp2, lp_text2])
prev2.click(prev_img_lp, outputs=[lp2, lp_text2])
demo.launch(share=False) |