ANPR / app.py
tsaddev's picture
Upload 18 files
0a2675c verified
import gradio as gr
import model1 as m1
import model2 as m2
cars = []
lps = []
lp_texts = []
counter = 0
# this is the main function that passes the images to the model 1
def model1(image):
global cars, lps, lp_texts, counter
(cars, lps, lp_texts) = m1.run([image])
counter = 0
return cars[0], lps[0], lp_texts[0]
# this is the main function that passes the images to the model 1
def model2(image):
global lps, lp_texts, counter
(lps, lp_texts) = m2.run([image])
counter = 0
return lps[0], lp_texts[0]
# this is the main function that passes the images to the model 1
def model3(image):
global cars, lps, lp_texts, counter
(cars, lps, lp_texts) = m3.run([image])
counter = 0
return cars[0], lps[0], lp_texts[0]
# function to go to next detected car licence plate
def next_img():
global counter
counter += 1
index = int(counter % len(cars))
return cars[index], lps[index], lp_texts[index]
# function to go to prev detected car licence plate
def prev_img():
global counter
counter -= 1
index = int(counter % len(cars))
return cars[index], lps[index], lp_texts[index]
# function to go to next detected licence plate
def next_img_lp():
global counter
counter += 1
index = int(counter % len(lps))
return lps[index], lp_texts[index]
# function to go to prev detected licence plate
def prev_img_lp():
global counter
counter -= 1
index = int(counter % len(lps))
return lps[index], lp_texts[index]
# this code is responcible for the front end part of the page
with gr.Blocks() as demo:
gr.Markdown("## ANPR Project")
with gr.Tab("Model 1"):
gr.Markdown("Using 3 different ML models")
gr.Markdown("YOLOv8n for car dection + YOLOv8n for LP detection + easy ocr for text detection")
img = gr.Image(label="Input")
submit = gr.Button(value="submit")
with gr.Row():
car = gr.Image(label="Car")
lp = gr.Image(label="Licence Plate")
lp_text = gr.Text(label="Plate Number")
with gr.Row():
next = gr.Button(value="next")
prev = gr.Button(value="prev")
submit.click(model1, inputs=[img], outputs=[car, lp, lp_text])
next.click(next_img, outputs=[car, lp, lp_text])
prev.click(prev_img, outputs=[car, lp, lp_text])
with gr.Tab("Model 2"):
gr.Markdown("Using 2 different ML models")
gr.Markdown("YOLOv8m for car dection + easy ocr for text detection")
gr.Markdown("YOLOv8m for car dection is trained on a large dataset of 25K training images")
img2 = gr.Image(label="Input")
submit2 = gr.Button(value="submit")
with gr.Row():
lp2 = gr.Image(label="Licence Plate")
lp_text2 = gr.Text(label="Plate Number")
with gr.Row():
next2 = gr.Button(value="next")
prev2 = gr.Button(value="prev")
submit2.click(model2, inputs=[img2], outputs=[lp2, lp_text2])
next2.click(next_img_lp, outputs=[lp2, lp_text2])
prev2.click(prev_img_lp, outputs=[lp2, lp_text2])
demo.launch(share=False)