Gabolozano commited on
Commit
b3747be
1 Parent(s): ed4ede9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -18
app.py CHANGED
@@ -29,13 +29,10 @@ def draw_detections(image, detections):
29
  x_max = box['xmax']
30
  y_max = box['ymax']
31
 
32
- # Draw rectangles and label with a larger font size
33
  cv2.rectangle(np_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
34
  label_text = f'{label} {score:.2f}'
35
- label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0]
36
- label_x = x_min
37
- label_y = y_min - label_size[1] if y_min - label_size[1] > 10 else y_min + label_size[1]
38
- cv2.putText(np_image, label_text, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
39
 
40
  # Convert BGR to RGB for displaying
41
  final_image = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
@@ -51,18 +48,19 @@ def get_pipeline_prediction(pil_image):
51
  print(f"An error occurred: {str(e)}")
52
  return pil_image, {"error": str(e)}
53
 
54
- # Setting up Gradio interface with tabs for the outputs
55
- demo = gr.Interface(
56
- fn=get_pipeline_prediction,
57
- inputs=gr.Image(label="Input image", type="pil"),
58
- outputs=[
59
- gr.Image(label="Annotated Image", type="pil"),
60
- gr.JSON(label="Detected Objects")
61
- ],
62
- examples=[
63
- ["sample1.jpg"], ["sample2.jpg"] # Update the example paths as needed
64
- ],
65
- title="Object Detection Interface"
66
- )
 
67
 
68
  demo.launch()
 
29
  x_max = box['xmax']
30
  y_max = box['ymax']
31
 
32
+ # Draw rectangles and text with a larger font
33
  cv2.rectangle(np_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
34
  label_text = f'{label} {score:.2f}'
35
+ cv2.putText(np_image, label_text, (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 3)
 
 
 
36
 
37
  # Convert BGR to RGB for displaying
38
  final_image = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
 
48
  print(f"An error occurred: {str(e)}")
49
  return pil_image, {"error": str(e)}
50
 
51
+ # Define the Gradio blocks interface
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown("## Object Detection")
54
+ with gr.Row():
55
+ inp_image = gr.Image(label="Input image", type="pil", tool=None)
56
+ btn_run = gr.Button('Run Detection')
57
+
58
+ with gr.Tab("Annotated Image"):
59
+ out_image = gr.Image()
60
+
61
+ with gr.Tab("Detection Results"):
62
+ out_json = gr.JSON()
63
+
64
+ btn_run.click(get_pipeline_prediction, inputs=inp_image, outputs=[out_image, out_json])
65
 
66
  demo.launch()