brxerq commited on
Commit
e691631
1 Parent(s): 575f619

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -30
app.py CHANGED
@@ -82,13 +82,18 @@ def perform_detection(image, interpreter, labels):
82
  detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])
83
  return image
84
 
 
 
 
85
  def detect_image(input_image):
86
  image = np.array(input_image)
87
- result_image = perform_detection(image, interpreter, labels)
88
- return Image.fromarray(result_image)
 
 
89
 
90
  def detect_video(input_video):
91
- cap = cv2.VideoCapture(input_video.name)
92
  frames = []
93
 
94
  while cap.isOpened():
@@ -96,15 +101,20 @@ def detect_video(input_video):
96
  if not ret:
97
  break
98
 
99
- result_frame = perform_detection(frame, interpreter, labels)
100
- frames.append(result_frame)
 
 
101
 
102
  cap.release()
103
 
 
 
 
104
  height, width, layers = frames[0].shape
105
  size = (width, height)
106
- output_video_path = "result_" + input_video.name
107
- out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
108
 
109
  for frame in frames:
110
  out.write(frame)
@@ -113,26 +123,19 @@ def detect_video(input_video):
113
 
114
  return output_video_path
115
 
116
- image_input = gr.inputs.Image(type="pil", label="Upload an image")
117
- image_output = gr.outputs.Image(type="pil", label="Detection Result")
118
-
119
- video_input = gr.inputs.Video(type="file", label="Upload a video")
120
- video_output = gr.outputs.Video(label="Detection Result")
121
-
122
- app = gr.Interface(
123
- fn=detect_image,
124
- inputs=image_input,
125
- outputs=image_output,
126
- live=True,
127
- description="Object Detection on Images"
128
- )
129
-
130
- app_video = gr.Interface(
131
- fn=detect_video,
132
- inputs=video_input,
133
- outputs=video_output,
134
- live=True,
135
- description="Object Detection on Videos"
136
- )
137
-
138
- gr.TabbedInterface([app, app_video], ["Image Detection", "Video Detection"]).launch()
 
82
  detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])
83
  return image
84
 
85
+ def resize_image(image, size=640):
86
+ return cv2.resize(image, (size, size))
87
+
88
  def detect_image(input_image):
89
  image = np.array(input_image)
90
+ resized_image = resize_image(image)
91
+ result_image = perform_detection(resized_image, interpreter, labels)
92
+ result_image_resized = resize_image(result_image)
93
+ return Image.fromarray(result_image_resized)
94
 
95
  def detect_video(input_video):
96
+ cap = cv2.VideoCapture(input_video)
97
  frames = []
98
 
99
  while cap.isOpened():
 
101
  if not ret:
102
  break
103
 
104
+ resized_frame = resize_image(frame)
105
+ result_frame = perform_detection(resized_frame, interpreter, labels)
106
+ result_frame_resized = resize_image(result_frame)
107
+ frames.append(result_frame_resized)
108
 
109
  cap.release()
110
 
111
+ if not frames:
112
+ raise ValueError("No frames were read from the video.")
113
+
114
  height, width, layers = frames[0].shape
115
  size = (width, height)
116
+ output_video_path = "result_" + os.path.basename(input_video)
117
+ out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 15, size)
118
 
119
  for frame in frames:
120
  out.write(frame)
 
123
 
124
  return output_video_path
125
 
126
+ app = gr.Blocks()
127
+
128
+ with app:
129
+ with gr.Tab("Image Detection"):
130
+ gr.Markdown("Upload an image for object detection")
131
+ image_input = gr.Image(type="pil", label="Upload an image")
132
+ image_output = gr.Image(type="pil", label="Detection Result")
133
+ gr.Button("Submit").click(fn=detect_image, inputs=image_input, outputs=image_output)
134
+
135
+ with gr.Tab("Video Detection"):
136
+ gr.Markdown("Upload a video for object detection")
137
+ video_input = gr.Video(label="Upload a video")
138
+ video_output = gr.Video(label="Detection Result")
139
+ gr.Button("Submit").click(fn=detect_video, inputs=video_input, outputs=video_output)
140
+
141
+ app.launch()