lorebianchi98 commited on
Commit
b1322c5
Β·
1 Parent(s): ad13250

Updated app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -23
app.py CHANGED
@@ -102,28 +102,66 @@ It can recognize subtle object differences such as **color, texture, and materia
102
  πŸ“˜ [Training & evaluation code](https://github.com/lorebianchi98/FG-OVD/NoctOWL)
103
  """
104
 
105
- # --- Gradio Interface ---
106
- demo = gr.Interface(
107
- fn=query_image,
108
- inputs=[
109
- gr.Image(label="Input Image"),
110
- gr.Textbox(label="Text Queries (comma-separated)", placeholder="e.g., red shoes, striped shirt, yellow ball"),
111
- gr.Slider(0, 1, value=0.1, step=0.01, label="Score Threshold"),
112
- gr.Dropdown(
113
- choices=["NoctOWLv2-Base", "NoctOWLv2-Large"],
114
- label="Select Model",
115
- value=None,
116
- info="Select which model to use for detection",
117
- ),
118
- ],
119
- outputs=gr.AnnotatedImage(label="Detected Objects"),
120
- title="NoctOWLv2 β€” Fine-Grained Zero-Shot Object Detection",
121
- description=description,
122
- examples=[
123
- ["assets/desciglio.jpg", "striped football shirt, plain red football shirt, yellow shoes, red shoes", 0.07],
124
- ["assets/pool.jpg", "white ball, blue ball, black ball, yellow ball", 0.1],
125
- ["assets/patio.jpg", "ceramic mug, glass mug, pink flowers, blue flowers", 0.09],
126
- ],
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  demo.launch()
 
102
  πŸ“˜ [Training & evaluation code](https://github.com/lorebianchi98/FG-OVD/NoctOWL)
103
  """
104
 
105
+ # --- Create Interface Layout ---
106
+ with gr.Blocks(title="NoctOWLv2 β€” Fine-Grained Zero-Shot Object Detection") as demo:
107
+ gr.Markdown(description)
108
+
109
+ with gr.Row():
110
+ with gr.Column():
111
+ input_image = gr.Image(label="Input Image")
112
+
113
+ text_queries = gr.Textbox(
114
+ label="Text Queries (comma-separated)",
115
+ placeholder="e.g., red shoes, striped shirt, yellow ball"
116
+ )
117
+
118
+ score_threshold = gr.Slider(
119
+ 0, 1, value=0.1, step=0.01, label="Score Threshold"
120
+ )
121
+
122
+ model_dropdown = gr.Dropdown(
123
+ choices=["NoctOWLv2-Base", "NoctOWLv2-Large"],
124
+ label="Select Model",
125
+ value=None,
126
+ info="Select which model to use for detection",
127
+ )
128
+
129
+ run_button = gr.Button("πŸš€ Run Detection", interactive=False)
130
+
131
+ with gr.Column():
132
+ output_image = gr.AnnotatedImage(label="Detected Objects")
133
+
134
+ # --- Enable / Disable Run Button ---
135
+ def toggle_button(model, text):
136
+ return gr.update(interactive=bool(model and text.strip()))
137
+
138
+ model_dropdown.change(
139
+ fn=toggle_button,
140
+ inputs=[model_dropdown, text_queries],
141
+ outputs=run_button,
142
+ )
143
+
144
+ text_queries.change(
145
+ fn=toggle_button,
146
+ inputs=[model_dropdown, text_queries],
147
+ outputs=run_button,
148
+ )
149
+
150
+ # --- Connect Button to Inference ---
151
+ run_button.click(
152
+ fn=query_image,
153
+ inputs=[input_image, text_queries, score_threshold, model_dropdown],
154
+ outputs=output_image,
155
+ )
156
+
157
+ # --- Example Images (without predefined model) ---
158
+ gr.Examples(
159
+ examples=[
160
+ ["assets/desciglio.jpg", "striped football shirt, plain red football shirt, yellow shoes, red shoes", 0.07],
161
+ ["assets/pool.jpg", "white ball, blue ball, black ball, yellow ball", 0.1],
162
+ ["assets/patio.jpg", "ceramic mug, glass mug, pink flowers, blue flowers", 0.09],
163
+ ],
164
+ inputs=[input_image, text_queries, score_threshold],
165
+ )
166
 
167
  demo.launch()