import torch import gradio as gr from huggingface_hub import hf_hub_download from PIL import Image REPO_ID = "rgp/yolov5-street-view-detection-grayscale" FILENAME = "best_gray.pt" yolov5_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME) model = torch.hub.load('ultralytics/yolov5', 'custom', path=yolov5_weights, force_reload=True) # local repo def object_detection(im, size=640): results = model(im) # inference #results.print() # print results to screen #results.show() # display results #results.save() # save as results1.jpg, results2.jpg... etc. results.render() # updates results.imgs with boxes and labels return Image.fromarray(results.ims[0]) title = "Pedestrians and Transportations detection on the streets - GrayScale" description = """This model is a small demo based on an analysis of 680 images - GrayScale. """ css = ".output-image, .input-image, .image-preview {height: 640px !important}" input = gr.inputs.Image(shape=(640, 640), image_mode="RGB", source="upload", label="Imagem", optional=False) output = gr.outputs.Image(type="pil", label="Output Image") examples = [["sample_images/image-2.jpeg"], ["sample_images/image-1.jpg"], ["sample_images/image-3.jpg"], ["sample_images/image-4.jpg"], ["sample_images/image-5.jpg"]] gr.Interface( fn=object_detection, inputs=input, outputs=output, title=title, description=description, examples=examples, css=css ).launch(debug=True)