Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| import cv2 | |
| import os | |
| import tempfile | |
| ############################################ | |
| # 1. SINGLE-IMAGE GOIS INFERENCE (Imported) | |
| ############################################ | |
| from gois_core import run_inference | |
| """ | |
| Assumed signature of run_inference: | |
| run_inference( | |
| image: np.ndarray (RGB), | |
| model_name: str, | |
| coarse_size: int, | |
| fine_size: int, | |
| c_overlap: float, | |
| f_overlap: float, | |
| nms_thresh: float | |
| ) -> Tuple[ | |
| np.ndarray, # FI image | |
| np.ndarray, # GOIS image | |
| str, # FI HTML table | |
| str, # GOIS HTML table | |
| np.ndarray, # bar_chart image | |
| np.ndarray, # fi_pie image | |
| np.ndarray, # gois_pie image | |
| str # metrics table HTML | |
| ] | |
| """ | |
| ############################################ | |
| # 2. BATCH INFERENCE FOR MULTIPLE IMAGES | |
| ############################################ | |
| def run_inference_batch( | |
| file_paths: list, | |
| model_name: str, | |
| coarse_size: int, | |
| fine_size: int, | |
| c_overlap: float, | |
| f_overlap: float, | |
| nms_thresh: float | |
| ): | |
| """ | |
| 1. `file_paths` is a list of string file paths. | |
| 2. For each path: | |
| - Load the image | |
| - Run single-image GOIS inference | |
| - Collect FI & GOIS outputs plus minimal metrics | |
| 3. Return: | |
| - A gallery of pairs [(FI_1, "Name FI"), (GOIS_1, "Name GOIS"), ..., (FI_N, "Name FI"), (GOIS_N, "Name GOIS")] | |
| - An HTML table summarizing metrics for each image | |
| """ | |
| if not file_paths: | |
| return None, "<p style='color:red;'>No images uploaded.</p>" | |
| # This will store pairs of images for a gallery | |
| combined_images = [] | |
| # This will collect rows (HTML) for a metrics summary table | |
| metrics_rows = [] | |
| for path in file_paths: | |
| img_name = os.path.basename(path) | |
| img = cv2.imread(path) # BGR | |
| if img is None: | |
| continue | |
| # Convert to RGB if your model expects that | |
| img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
| # 1) Run the single-image inference | |
| ( | |
| fi_img, | |
| gois_img, | |
| fi_table_html, | |
| gois_table_html, | |
| bar_chart, | |
| fi_pie, | |
| gois_pie, | |
| metrics_html | |
| ) = run_inference( | |
| img_rgb, model_name, coarse_size, fine_size, c_overlap, f_overlap, nms_thresh | |
| ) | |
| # 2) Add to gallery: (image, label) | |
| combined_images.append((fi_img, f"{img_name} - FI")) | |
| combined_images.append((gois_img, f"{img_name} - GOIS")) | |
| # 3) Collect metrics in a single row | |
| metrics_rows.append( | |
| f"<tr><td>{img_name}</td><td>{metrics_html or '(No metrics)'} </td></tr>" | |
| ) | |
| # 4) Build a single HTML table for the entire batch | |
| table_html = f""" | |
| <table border="1" style="border-collapse: collapse; width:100%;"> | |
| <thead> | |
| <tr><th>Image Name</th><th>Metrics</th></tr> | |
| </thead> | |
| <tbody> | |
| {''.join(metrics_rows)} | |
| </tbody> | |
| </table> | |
| """ | |
| return combined_images, table_html | |
| ############################################ | |
| # 3. EXTENDED MODEL REGISTRY (Placeholder) | |
| ############################################ | |
| from models_loader import EXTENDED_MODELS | |
| ############################################ | |
| # 4. OPTIONAL CUSTOM CSS | |
| ############################################ | |
| CUSTOM_CSS = """ | |
| body { | |
| background-color: #fafafa; | |
| font-family: 'Inter', sans-serif; | |
| } | |
| #custom-header { | |
| background: linear-gradient(135deg, #8EC5FC, #E0C3FC); | |
| border-radius: 10px; | |
| padding: 1rem; | |
| text-align: center; | |
| color: #fff; | |
| } | |
| #custom-header h1 { font-size: 2.0rem; margin-bottom: 0.3rem; } | |
| #custom-header h3 { font-size: 1.2rem; margin-top: 0.2rem; } | |
| .tabitem { | |
| background-color: #ffffff; | |
| border: 1px solid #ddd; | |
| border-radius: 8px; | |
| margin: 0.5rem 0; | |
| padding: 1rem; | |
| } | |
| .gradio-tab .gradio-tabitem label { | |
| background: linear-gradient(135deg, #ffc3a0, #ffafbd); | |
| color: #3c3c3c !important; | |
| border-radius: 5px 5px 0 0; | |
| margin-right: 3px; | |
| font-weight: 600 !important; | |
| font-size: 0.95rem; | |
| } | |
| .gradio-tab .gradio-tabitem input:checked + label { | |
| background: linear-gradient(135deg, #D3CCE3, #E9E4F0); | |
| color: #000 !important; | |
| } | |
| """ | |
| ############################################ | |
| # 5. BUILD GRADIO INTERFACE | |
| ############################################ | |
| import gradio as gr | |
| def build_app(): | |
| theme = gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="pink", | |
| neutral_hue="blue" | |
| ) | |
| with gr.Blocks(css=CUSTOM_CSS, theme=theme) as demo: | |
| # 5.1 HEADER | |
| gr.HTML(""" | |
| <div id="custom-header"> | |
| <h1>GOIS vs. Full-Image Detection</h1> | |
| <h3>Single & Multiple Images Comparison</h3> | |
| </div> | |
| """) | |
| #################################### | |
| # 5.2. OVERVIEW TAB | |
| #################################### | |
| with gr.Tab("Overview", elem_classes="tabitem"): | |
| gr.Markdown(""" | |
| ### GOIS: Granular Object Inspection Strategy | |
| **Granular Object Inspection Strategy (GOIS)** slices images (coarse → fine) to detect objects that might be missed in a single pass: | |
| - Better handling of **occlusion** or partially visible objects. | |
| - Fewer **boundary artifacts** when patch edges overlap. | |
| - Can reduce **false positives** by skipping large uniform regions. | |
| **Why This App?** | |
| - **Single Image**: See side-by-side Full-Image (FI) detection vs. GOIS detection for one image. | |
| - **Multiple Images**: Upload a batch of images and get a combined gallery + metrics table. | |
| **Extended Models**: | |
| - YOLOv8 | |
| - RT-DETR-l | |
| - YOLOv8s-Worldv2 | |
| ...and more from our custom registry. | |
| **Speed vs. Accuracy**: | |
| - GOIS is more thorough but can be slower due to multi-slicing. | |
| - Adjust slice sizes and overlaps to balance performance. | |
| --- | |
| """) | |
| #################################### | |
| # 5.3. SINGLE IMAGE TAB | |
| #################################### | |
| with gr.Tab("Single Image", elem_classes="tabitem"): | |
| gr.Markdown("#### Upload one image and view FI vs. GOIS outputs in the same tab") | |
| # Input row | |
| with gr.Row(): | |
| inp_image = gr.Image(label="Upload Image", type="numpy") | |
| model_dd_img = gr.Dropdown( | |
| label="Select Model", | |
| choices=list(EXTENDED_MODELS.keys()), | |
| value="YOLOv8" | |
| ) | |
| # Parameter sliders | |
| with gr.Row(): | |
| coarse_size_img = gr.Slider(512, 768, value=640, step=1, label="Coarse Slice Size") | |
| fine_size_img = gr.Slider(128, 384, value=256, step=1, label="Fine Slice Size") | |
| c_overlap_img = gr.Slider(0.1, 0.4, value=0.2, step=0.1, label="Coarse Overlap") | |
| f_overlap_img = gr.Slider(0.1, 0.4, value=0.2, step=0.1, label="Fine Overlap") | |
| nms_thresh_img = gr.Slider(0.3, 0.5, value=0.4, step=0.1, label="NMS Threshold") | |
| run_btn_img = gr.Button("Run Inference (Single Image)", variant="primary") | |
| # Output section in the same tab | |
| gr.Markdown("#### Results (FI vs. GOIS) + Metrics") | |
| with gr.Row(): | |
| fi_out = gr.Image(label="FI-Det (Baseline)") | |
| gois_out = gr.Image(label="GOIS-Det") | |
| with gr.Row(): | |
| fi_table_html = gr.HTML(label="FI Classes/Distribution") | |
| gois_table_html = gr.HTML(label="GOIS Classes/Distribution") | |
| with gr.Row(): | |
| bar_chart = gr.Image(label="Bar Chart (FI vs. GOIS)") | |
| fi_pie_img = gr.Image(label="FI Pie Chart") | |
| gois_pie_img= gr.Image(label="GOIS Pie Chart") | |
| metrics_html = gr.HTML(label="Metrics Table (Single Image)") | |
| # Button binding | |
| run_btn_img.click( | |
| fn=run_inference, | |
| inputs=[ | |
| inp_image, | |
| model_dd_img, | |
| coarse_size_img, | |
| fine_size_img, | |
| c_overlap_img, | |
| f_overlap_img, | |
| nms_thresh_img | |
| ], | |
| outputs=[ | |
| fi_out, | |
| gois_out, | |
| fi_table_html, | |
| gois_table_html, | |
| bar_chart, | |
| fi_pie_img, | |
| gois_pie_img, | |
| metrics_html | |
| ] | |
| ) | |
| #################################### | |
| # 5.4. MULTIPLE IMAGES TAB | |
| #################################### | |
| with gr.Tab("Multiple Images", elem_classes="tabitem"): | |
| gr.Markdown("#### Upload multiple images and compare FI vs. GOIS on each") | |
| # Input row | |
| with gr.Row(): | |
| batch_imgs = gr.Files( | |
| label="Upload Multiple Images (JPG/PNG)", | |
| file_count="multiple", | |
| type="filepath" | |
| ) | |
| model_dd_multi = gr.Dropdown( | |
| label="Select Model", | |
| choices=list(EXTENDED_MODELS.keys()), | |
| value="YOLOv8" | |
| ) | |
| # Parameter sliders | |
| with gr.Row(): | |
| coarse_size_multi = gr.Slider(512, 768, value=640, step=1, label="Coarse Slice Size") | |
| fine_size_multi = gr.Slider(128, 384, value=256, step=1, label="Fine Slice Size") | |
| c_overlap_multi = gr.Slider(0.1, 0.4, value=0.2, step=0.1, label="Coarse Overlap") | |
| f_overlap_multi = gr.Slider(0.1, 0.4, value=0.2, step=0.1, label="Fine Overlap") | |
| nms_thresh_multi = gr.Slider(0.3, 0.5, value=0.4, step=0.1, label="NMS Threshold") | |
| run_btn_multi = gr.Button("Run Inference (Batch)", variant="primary") | |
| # Outputs in the same tab | |
| gr.Markdown("#### Batch Results & Metrics") | |
| batch_gallery = gr.Gallery(label="FI-Det vs GOIS-Det (All Images)") | |
| batch_metrics_html = gr.HTML(label="Batch Metrics Table") | |
| # Button binding | |
| run_btn_multi.click( | |
| fn=run_inference_batch, | |
| inputs=[ | |
| batch_imgs, | |
| model_dd_multi, | |
| coarse_size_multi, | |
| fine_size_multi, | |
| c_overlap_multi, | |
| f_overlap_multi, | |
| nms_thresh_multi | |
| ], | |
| outputs=[ | |
| batch_gallery, | |
| batch_metrics_html | |
| ] | |
| ) | |
| #################################### | |
| # 5.5. RECOMMENDED CONFIGURATIONS TAB | |
| #################################### | |
| with gr.Tab("Recommended Configs"): | |
| gr.Markdown(""" | |
| ### Suggested Parameter Settings (for GOIS) | |
| | Config | Coarse Size | Fine Size | Coarse Overlap | Fine Overlap | NMS Thresh | | |
| |---------|------------:|----------:|---------------:|-------------:|-----------:| | |
| | **C1** | 512 px | 128 px | 0.1 | 0.1 | 0.3 | | |
| | **C2** | 640 px | 256 px | 0.2 | 0.2 | 0.4 | | |
| | **C3** | 768 px | 384 px | 0.3 | 0.3 | 0.5 | | |
| **Tips**: | |
| - **Coarse Size**: Larger slices → fewer patches → faster, but might miss tiny objects. | |
| - **Fine Size**: Smaller slices → more detailed detection → slower. | |
| - **Overlap**: Higher overlap → fewer boundary misses → more computations. | |
| - **NMS Threshold**: Lower → more aggressive overlap removal → might drop some true positives. | |
| """) | |
| gr.Markdown(""" | |
| ### More Suggestions and Advanced Parameter Settings (for GOIS) | |
| | Parameter | Recommended Values | Expanded Ranges | Effect on Performance | Handling Errors | | |
| |-------------------------|---------------------|-----------------|-------------------------------------------------------------------|---------------------------------------------------------------| | |
| | **Coarse Slice Size** | 512 - 768 | 256 - 1024 | Larger slices improve speed but may miss small objects | If small objects are missed, reduce slice size | | |
| | **Fine Slice Size** | 128 - 384 | 64 - 512 | Smaller slices detect tiny objects better but increase computation| If too many duplicate detections occur, increase NMS | | |
| | **Coarse Overlap** | 0.2 - 0.4 | 0.1 - 0.5 | Higher overlap reduces boundary artifacts but increases processing time | Increase overlap if objects near boundaries are missing | | |
| | **Fine Overlap** | 0.4 - 0.6 | 0.3 - 0.7 | Higher overlap helps detect occluded objects but increases computation | Increase overlap to detect occluded objects but monitor speed | | |
| | **NMS Threshold** | 0.3 - 0.5 | 0.2 - 0.6 | Lower values remove overlapping boxes but may discard true positives | If detections overlap, lower NMS; if detections disappear, increase NMS | | |
| | **IoU Threshold** | 0.4 - 0.6 | 0.3 - 0.7 | Defines the box-overlap level for merging detections | Lower values reduce false positives; higher values may merge distinct objects | | |
| | **Confidence Threshold**| 0.2 - 0.5 | 0.1 - 0.6 | Minimum confidence score required to keep detections | If too many false positives, increase the confidence threshold | | |
| | **✅ Adaptive Slicing** | **Enabled** | Auto-Tuned | **Dynamically adjusts slice sizes based on object density** | **Helps avoid redundant computations and adapts to object distribution** | | |
| | **✅ Multi-Scale Fusion** | **Enabled** | Auto-Tuned | **Merges detections from different scales for improved accuracy** | **Reduces false negatives for small or occluded objects** | | |
| | **✅ Context Integration** | **Enabled** | Auto-Tuned | **Incorporates adjacent slice info to handle occlusions** | **Reduces missing objects near slice boundaries** | | |
| | **✅ Class-Aware NMS** | **Enabled** | Auto-Tuned | Applies NMS per object class to preserve small-object diversity | Helps maintain multiple overlapping objects of different classes | | |
| """) | |
| return demo | |
| ############################################ | |
| # 6. MAIN | |
| ############################################ | |
| if __name__ == "__main__": | |
| demo_app = build_app() | |
| demo_app.launch() | |