import gradio as gr from inference import combined_inference_live from model_loader import EXTENDED_MODELS # Gradio Web Interface with gr.Blocks() as app: gr.Markdown("# Live Object Detection: Full vs. Slicing-Based Detection") gr.Markdown(""" **How It Works**: - **Full Inference**: Runs a single pass over the entire frame. - **Slicing Detection**: Divides the frame into patches, detects objects, applies Non-Maximum Suppression (NMS). - **Outputs retain the original BGR format for color accuracy**. """) with gr.Row(): webcam = gr.Image(sources="webcam", streaming=True, label="Webcam Feed") model_dd = gr.Dropdown(label="Select Model", choices=list(EXTENDED_MODELS.keys()), value="YOLOv8") with gr.Row(): conf_slider = gr.Slider(0, 1.0, value=0.25, step=0.05, label="Confidence Threshold") slice_slider = gr.Slider(128, 1024, value=512, step=1, label="Slice Size") overlap_slider = gr.Slider(0.0, 0.5, value=0.2, step=0.05, label="Overlap Rate") fi_output = gr.Image(label="Full Inference") slice_output = gr.Image(label="Slicing + NMS") webcam.stream( fn=combined_inference_live, inputs=[webcam, model_dd, conf_slider, slice_slider, overlap_slider], outputs=[fi_output, slice_output] ) # Run the Gradio app app.launch()