import gradio as gr import torch import yolov7 # Images torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg') torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg') def yolov7_inference( image: gr.inputs.Image = None, model_path: gr.inputs.Dropdown = None, image_size: gr.inputs.Slider = 640, conf_threshold: gr.inputs.Slider = 0.25, iou_threshold: gr.inputs.Slider = 0.45, ): model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False) model.conf = conf_threshold model.iou = iou_threshold results = model([image], size=image_size) return results.render()[0] inputs = [ gr.inputs.Image(type="pil", label="Input Image"), gr.inputs.Dropdown( choices=[ "StarAtNyte1/yolov7_custom", ], default="StarAtNyte1/yolov7_custom", label="Model", ), ] outputs = gr.outputs.Image(type="filepath", label="Output Image") title = "Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors" examples = [['small-vehicles1.jpeg', 'kadirnar/yolov7-tiny-v0.1', 640, 0.25, 0.45], ['zidane.jpg', 'kadirnar/yolov7-v0.1', 640, 0.25, 0.45]] demo_app = gr.Interface( fn=yolov7_inference, inputs=inputs, outputs=outputs, title=title, examples=examples, cache_examples=True, theme='huggingface', ) demo_app.launch(debug=True, enable_queue=True)