apailang commited on
Commit
ea6da2d
β€’
1 Parent(s): 261d154

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -22
app.py CHANGED
@@ -71,34 +71,32 @@ def detect_video(video):
71
  # Create a video capture object
72
  cap = cv2.VideoCapture(video)
73
 
 
 
 
 
 
74
  # Process frames in a loop
75
  while cap.isOpened():
76
  ret, frame = cap.read()
77
  if not ret:
78
  break
79
-
80
- # Expand dimensions since model expects images to have shape: [1, None, None, 3]
81
- image_np_expanded = np.expand_dims(frame, axis=0)
82
-
83
- # Run inference
84
- output_dict = detection_model(image_np_expanded)
85
-
86
- # Extract detections
87
- boxes = output_dict['detection_boxes'][0].numpy()
88
- scores = output_dict['detection_scores'][0].numpy()
89
- classes = output_dict['detection_classes'][0].numpy().astype(np.int64)
90
-
91
- # Draw bounding boxes and labels
92
  image_np_with_detections = viz_utils.visualize_boxes_and_labels_on_image_array(
93
- frame,
94
- boxes,
95
- classes,
96
- scores,
97
- category_index,
98
- use_normalized_coordinates=True,
99
- max_boxes_to_draw=20,
100
- min_score_thresh=.5,
101
- agnostic_mode=False)
 
 
102
 
103
  # Yield the processed frame
104
  yield image_np_with_detections
 
71
  # Create a video capture object
72
  cap = cv2.VideoCapture(video)
73
 
74
+ nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
75
+ frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
76
+ frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
77
+ fps = cap.get(cv2.CAP_PROP_FPS)
78
+
79
  # Process frames in a loop
80
  while cap.isOpened():
81
  ret, frame = cap.read()
82
  if not ret:
83
  break
84
+ for i in tqdm(range(nb_frames)):
85
+ ret, image_np = video_reader.read()
86
+ input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8)
87
+ results = detection_model(input_tensor)
 
 
 
 
 
 
 
 
 
88
  image_np_with_detections = viz_utils.visualize_boxes_and_labels_on_image_array(
89
+ image_np,
90
+ results['detection_boxes'][0].numpy(),
91
+ (results['detection_classes'][0].numpy()+ label_id_offset).astype(int),
92
+ results['detection_scores'][0].numpy(),
93
+ category_index,
94
+ use_normalized_coordinates=True,
95
+ max_boxes_to_draw=200,
96
+ min_score_thresh=.50,
97
+ agnostic_mode=False,
98
+ line_thickness=2)
99
+
100
 
101
  # Yield the processed frame
102
  yield image_np_with_detections