import gradio as gr import fastai.vision.all as fv from PIL import Image, ImageDraw import skimage import os learn = fv.load_learner("model.pkl") def call(image, step_size:int=100, blocks:int=4): # print(image) original_image = Image.fromarray(image).resize((400,400)) image = Image.new(mode='RGB', size=(step_size*blocks, step_size*blocks)) #, color=255 draw = ImageDraw.Draw(image) for (x,y) in [ (x,y) for x in range(0, blocks * step_size, step_size) for y in range(0, blocks * step_size, step_size)]: cropped_image = original_image.crop((x, y, x+step_size, y+step_size)) image.paste(cropped_image, (x,y)) prediction = learn.predict(cropped_image) print(prediction) marker = f"{prediction[0][0].upper()} {prediction[2][prediction[1].item()].item()*100:.0f}" position = (x+10, y+10) bbox = draw.textbbox(position, marker, font=None) draw.rectangle(bbox, fill="white") draw.text(position, marker, font=None, fill="black") draw = ImageDraw.Draw(image) for x in range(0, blocks * step_size, step_size): # vertical line line = ((x, 0), (x, blocks * step_size)) draw.line(line, fill=128, width=3) # horizontal line line = ((0, x), (blocks * step_size, x)) draw.line(line, fill=128, width=3) return image title = "Traffic Light Detector" description = "Experiment traffic light detection to evaluate the value of captcha security controls" iface = gr.Interface(fn=call, inputs="image", outputs="image", title=title, description=description, examples=[ os.path.join(os.path.dirname(__file__), "examples/1.jpg"), os.path.join(os.path.dirname(__file__), "examples/2.jpg") ], thumbnail="thumbnail.webp") iface.launch()