Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -37,13 +37,14 @@ def fetch_image(image_url):
|
|
| 37 |
except Exception as e:
|
| 38 |
raise ValueError(f"Failed to fetch image from URL: {str(e)}")
|
| 39 |
|
| 40 |
-
def predict(image_url
|
| 41 |
"""
|
| 42 |
Process the image URL and prompt, return annotated image data.
|
| 43 |
"""
|
| 44 |
try:
|
| 45 |
# Validate and fetch the image
|
| 46 |
image = fetch_image(image_url)
|
|
|
|
| 47 |
|
| 48 |
# Call the Hugging Face Inference API
|
| 49 |
stream = client.chat.completions.create(
|
|
@@ -87,16 +88,11 @@ def predict(image_url, prompt):
|
|
| 87 |
def create_gradio_interface():
|
| 88 |
with gr.Blocks(title="Object Detection Demo") as demo:
|
| 89 |
gr.Markdown("# Object Detection with Bounding Boxes")
|
| 90 |
-
gr.Markdown("Provide an image URL
|
| 91 |
|
| 92 |
with gr.Row():
|
| 93 |
with gr.Column():
|
| 94 |
image_url = gr.Textbox(label="Image URL", placeholder="Enter a publicly accessible image URL")
|
| 95 |
-
prompt = gr.Textbox(
|
| 96 |
-
label="Prompt",
|
| 97 |
-
placeholder="e.g., 'Detect and label all objects in the image with bounding boxes.'",
|
| 98 |
-
lines=3
|
| 99 |
-
)
|
| 100 |
submit_btn = gr.Button("Run Detection")
|
| 101 |
with gr.Column():
|
| 102 |
output_image = gr.AnnotatedImage(label="Detected Objects")
|
|
@@ -104,7 +100,7 @@ def create_gradio_interface():
|
|
| 104 |
|
| 105 |
submit_btn.click(
|
| 106 |
fn=predict,
|
| 107 |
-
inputs=[image_url
|
| 108 |
outputs=[output_image, status]
|
| 109 |
)
|
| 110 |
|
|
|
|
| 37 |
except Exception as e:
|
| 38 |
raise ValueError(f"Failed to fetch image from URL: {str(e)}")
|
| 39 |
|
| 40 |
+
def predict(image_url):
|
| 41 |
"""
|
| 42 |
Process the image URL and prompt, return annotated image data.
|
| 43 |
"""
|
| 44 |
try:
|
| 45 |
# Validate and fetch the image
|
| 46 |
image = fetch_image(image_url)
|
| 47 |
+
prompt = "Detect all objects in the provided image and output their bounding box coordinates in the format <box>(x1,y1,x2,y2)</box>. Do not include any other text or descriptions. If multiple objects are detected, list each bounding box in a new <box> tag."
|
| 48 |
|
| 49 |
# Call the Hugging Face Inference API
|
| 50 |
stream = client.chat.completions.create(
|
|
|
|
| 88 |
def create_gradio_interface():
|
| 89 |
with gr.Blocks(title="Object Detection Demo") as demo:
|
| 90 |
gr.Markdown("# Object Detection with Bounding Boxes")
|
| 91 |
+
gr.Markdown("Provide an image URL detect objects and display bounding boxes.")
|
| 92 |
|
| 93 |
with gr.Row():
|
| 94 |
with gr.Column():
|
| 95 |
image_url = gr.Textbox(label="Image URL", placeholder="Enter a publicly accessible image URL")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
submit_btn = gr.Button("Run Detection")
|
| 97 |
with gr.Column():
|
| 98 |
output_image = gr.AnnotatedImage(label="Detected Objects")
|
|
|
|
| 100 |
|
| 101 |
submit_btn.click(
|
| 102 |
fn=predict,
|
| 103 |
+
inputs=[image_url],
|
| 104 |
outputs=[output_image, status]
|
| 105 |
)
|
| 106 |
|