import json import gradio as gr import yolov5 from PIL import Image from huggingface_hub import hf_hub_download app_title = "Forklift Object Detection" models_ids = ['keremberke/yolov5n-forklift', 'keremberke/yolov5s-forklift', 'keremberke/yolov5m-forklift'] article = f"

model | dataset | awesome-yolov5-models

" current_model_id = models_ids[-1] model = yolov5.load(current_model_id) examples = [['test_images/32LSCZQDHZO7_jpg.rf.8fddaa4b5ed4db87d19a32d4554b9c23.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/6URLZIZIQ6S0_jpg.rf.4661cb4082077e616ec94250eea6328f.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/I91P1I5WNUZT_jpg.rf.c5c49a5f421751c30008a35e7b52087e.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/JWF31R9STW0L_jpg.rf.a785b0107b333fe746fe1c4c8d2f744f.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/LTDX8N8ZKBT2_jpg.rf.6e09889a432d15c19fa0fbdbb62d347f.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_01685_png.rf.57a2823eabfa135c0a508d18faa70ce3.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_03339_png.rf.f755ccc7bdf2a0ebc7e4553a0576ed50.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_04045_png.rf.31bd5eed4b55dbcafe568210774cb5dc.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_04302_png.rf.62eabd3a1cc0dbfcdffa9c5a9582f77c.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_07555_png.rf.9c2d725a383658227bc87891f68fe975.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/M_08430_png.rf.64508c4e583f64ac2cd431c99dc79834.jpg', 0.25, 'keremberke/yolov5m-forklift'], ['test_images/V75EBJ0AG2HV_jpg.rf.88822c95c57d6bfb33092eb5ec0a020c.jpg', 0.25, 'keremberke/yolov5m-forklift']] def predict(image, threshold=0.25, model_id=None): # update model if required global current_model_id global model if model_id != current_model_id: model = yolov5.load(model_id) current_model_id = model_id # get model input size config_path = hf_hub_download(repo_id=model_id, filename="config.json") with open(config_path, "r") as f: config = json.load(f) input_size = config["input_size"] # perform inference model.conf = threshold results = model(image, size=input_size) numpy_image = results.render()[0] output_image = Image.fromarray(numpy_image) return output_image gr.Interface( title=app_title, description="Created by 'keremberke'", article=article, fn=predict, inputs=[ gr.Image(type="pil"), gr.Slider(maximum=1, step=0.01, value=0.25), gr.Dropdown(models_ids, value=models_ids[-1]), ], outputs=gr.Image(type="pil"), examples=examples, cache_examples=True if examples else False, ).launch(enable_queue=True)