gregarific commited on
Commit
a08d52a
1 Parent(s): 8f00547

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -9
app.py CHANGED
@@ -76,17 +76,74 @@ def predict2(image_np):
76
  result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
77
 
78
  return result_pil_img
79
-
80
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  REPO_ID = "gregarific/assignmodel"
82
  detection_model = load_model()
83
  # pil_image = Image.open(image_path)
84
  # image_arr = pil_image_as_numpy_array(pil_image)
85
-
86
- # predicted_img = predict(image_arr)
87
- # predicted_img.save('predicted.jpg')
88
-
89
- gr.Interface(fn=predict,
90
  inputs=gr.Image(type="pil"),
91
- outputs=gr.Image(type="pil")
92
- ).launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
77
 
78
  return result_pil_img
79
+ ###
80
+ def predict_on_video(video_in_filepath, video_out_filepath, detection_model, category_index):
81
+ video_reader = cv2.VideoCapture(video_in_filepath)
82
+ frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
83
+ frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
84
+ fps = video_reader.get(cv2.CAP_PROP_FPS)
85
+
86
+ video_writer = cv2.VideoWriter(
87
+ video_out_filepath,
88
+ cv2.VideoWriter_fourcc(*'mp4v'),
89
+ fps,
90
+ (frame_w, frame_h)
91
+ )
92
+ while True:
93
+ ret, frame = video_reader.read()
94
+ if not ret:
95
+ break # Break the loop if the video is finished
96
+
97
+ processed_frame = predict(frame)
98
+ processed_frame_np = np.array(processed_frame)
99
+ video_writer.write(processed_frame_np)
100
+
101
+ # Release camera and close windows
102
+ video_reader.release()
103
+ video_writer.release()
104
+ cv2.destroyAllWindows()
105
+ cv2.waitKey(1)
106
+ video_reader.release()
107
+ video_writer.release()
108
+ cv2.destroyAllWindows()
109
+ cv2.waitKey(1)
110
+
111
+ # Function to process a video
112
+ def process_video(video_path):
113
+ output_path = "output_video.mp4" # Output path for the processed video
114
+ predict_on_video(video_path, output_path, detection_model, category_index)
115
+ return output_path
116
+
117
+ # Specify paths to example images
118
+ sample_images = [["sample1.jpg"], ["sample2.jpg"],
119
+ ["sample3.jpg"]
120
+ ]
121
+ ###
122
  REPO_ID = "gregarific/assignmodel"
123
  detection_model = load_model()
124
  # pil_image = Image.open(image_path)
125
  # image_arr = pil_image_as_numpy_array(pil_image)
126
+ ###
127
+ tab1 = gr.Interface(fn=predict,
 
 
 
128
  inputs=gr.Image(type="pil"),
129
+ outputs=gr.Image(type="pil"),
130
+ examples=sample_images,
131
+ title="Image - Object Detection (WheelChair vs Motorized WheelChair)",
132
+ description='Model used: SSD MobileNet V1 320x320.'
133
+ )
134
+
135
+ #gr.Interface(fn=predict,
136
+ # inputs=gr.Image(type="pil"),
137
+ # outputs=gr.Image(type="pil")
138
+ # ).launch(share=True)
139
+ tab2 = gr.Interface(
140
+ fn=process_video,
141
+ inputs=gr.File(label="Upload a video"),
142
+ outputs=gr.File(label="output"),
143
+ title='Video - Object Detection (WheelChair Type)'
144
+ )
145
+
146
+
147
+ iface = gr.TabbedInterface([tab1, tab2], tab_names = ['Image','Video'], title='WheelChair Type Detection')
148
+
149
+ iface.launch(share=True)