import gradio as gr import torch import yolov7 # Images #torch.hub.download_url_to_file('https://raw.githubusercontent.com/nihalbaig0/BD-Vehicle-Detection/main/images/bondor_to_kodomtoli.jpg', 'bondor_to_kodomtoli.jpg') #torch.hub.download_url_to_file('https://raw.githubusercontent.com/nihalbaig0/BD-Vehicle-Detection/main/images/lamabazar_to_versitygate.jpg', 'lamabazar_to_versitygate.jpg') def yolov7_inference( image: gr.inputs.Image = None, model_path: gr.inputs.Dropdown = None, image_size: gr.inputs.Slider = 640, conf_threshold: gr.inputs.Slider = 0.25, iou_threshold: gr.inputs.Slider = 0.45, ): """ YOLOv7 inference function Args: image: Input image model_path: Path to the model image_size: Image size conf_threshold: Confidence threshold iou_threshold: IOU threshold Returns: Rendered image """ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False) model.conf = conf_threshold model.iou = iou_threshold results = model([image], size=image_size) return results.render()[0] inputs = [ gr.inputs.Image(type="pil", label="Input Image"), gr.inputs.Dropdown( choices=[ "nihalbaig/yolov7", ], default="nihalbaig0/yolov7", label="Model", ), gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), ] outputs = gr.outputs.Image(type="filepath", label="Output Image") title = "Project-350: BD Vehicle Detection for Autonomous Vehicle" #examples = [['bondor_to_kodomtoli.jpg', 'nihalbaig0/yolov7', 640, 0.25, 0.45], ['lamabazar_to_versitygate.jpg', 'nihalbaig0/yolov7', 640, 0.25, 0.45]] demo_app = gr.Interface( fn=yolov7_inference, inputs=inputs, outputs=outputs, title=title, #examples=examples, cache_examples=True, theme='darkhuggingface', ) demo_app.launch(debug=True, enable_queue=True)