Update app.py
Browse files
app.py
CHANGED
@@ -63,10 +63,6 @@ def predict(image_np):
|
|
63 |
|
64 |
return result_pil_img
|
65 |
|
66 |
-
# def predict2(pilimg):
|
67 |
-
# image = None
|
68 |
-
# image = load_image_into_numpy_array(pilimg)
|
69 |
-
# return predict(image)
|
70 |
|
71 |
detection_model = load_model()
|
72 |
|
@@ -74,18 +70,67 @@ detection_model = load_model()
|
|
74 |
sample_images = [["test_1.jpg"],["test_9.jpg"],["test_6.jpg"],["test_7.jpg"],
|
75 |
["test_10.jpg"], ["test_11.jpg"],["test_8.jpg"]]
|
76 |
|
77 |
-
|
78 |
-
# example_inputs = [Image.open(image) for image in sample_images]
|
79 |
-
# example_outputs = [predict(input_image) for input_image in example_inputs]
|
80 |
-
|
81 |
-
# Save the example output image
|
82 |
-
# example_outputs[0].save("/home/user/app/predicted_1.jpg")
|
83 |
-
|
84 |
-
iface = gr.Interface(fn=predict,
|
85 |
inputs=gr.Image(label='Upload an expressway image', type="pil"),
|
86 |
outputs=gr.Image(type="pil"),
|
87 |
title='Blue and Yellow Taxi detection in live expressway traffic conditions (data.gov.sg)',
|
88 |
examples = sample_images
|
89 |
)
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
iface.launch(share=True)
|
|
|
63 |
|
64 |
return result_pil_img
|
65 |
|
|
|
|
|
|
|
|
|
66 |
|
67 |
detection_model = load_model()
|
68 |
|
|
|
70 |
sample_images = [["test_1.jpg"],["test_9.jpg"],["test_6.jpg"],["test_7.jpg"],
|
71 |
["test_10.jpg"], ["test_11.jpg"],["test_8.jpg"]]
|
72 |
|
73 |
+
tab1 = gr.Interface(fn=predict,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
inputs=gr.Image(label='Upload an expressway image', type="pil"),
|
75 |
outputs=gr.Image(type="pil"),
|
76 |
title='Blue and Yellow Taxi detection in live expressway traffic conditions (data.gov.sg)',
|
77 |
examples = sample_images
|
78 |
)
|
79 |
|
80 |
+
def predict_on_video(video_in_filepath, video_out_filepath, detection_model, category_index):
|
81 |
+
video_reader = cv2.VideoCapture(video_in_filepath)
|
82 |
+
|
83 |
+
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
84 |
+
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
|
85 |
+
fps = video_reader.get(cv2.CAP_PROP_FPS)
|
86 |
+
|
87 |
+
video_writer = cv2.VideoWriter(
|
88 |
+
video_out_filepath,
|
89 |
+
cv2.VideoWriter_fourcc(*'mp4v'),
|
90 |
+
fps,
|
91 |
+
(frame_w, frame_h)
|
92 |
+
)
|
93 |
+
|
94 |
+
label_id_offset = 0
|
95 |
+
|
96 |
+
while True:
|
97 |
+
ret, frame = video_reader.read()
|
98 |
+
|
99 |
+
if not ret:
|
100 |
+
break # Break the loop if the video is finished
|
101 |
+
|
102 |
+
processed_frame = predict(frame, detection_model, category_index, label_id_offset)
|
103 |
+
|
104 |
+
# Convert processed frame to numpy array
|
105 |
+
processed_frame_np = np.array(processed_frame)
|
106 |
+
|
107 |
+
# Write the frame to the output video
|
108 |
+
video_writer.write(processed_frame_np)
|
109 |
+
|
110 |
+
# Release video reader and writer
|
111 |
+
video_reader.release()
|
112 |
+
video_writer.release()
|
113 |
+
cv2.destroyAllWindows()
|
114 |
+
cv2.waitKey(1)
|
115 |
+
|
116 |
+
# Function to process a video
|
117 |
+
def process_video(video_path):
|
118 |
+
output_path = "output_video.mp4" # Output path for the processed video
|
119 |
+
# Assuming you have detection_model and category_index defined
|
120 |
+
predict_on_video(video_path, output_path, detection_model, category_index)
|
121 |
+
return output_path
|
122 |
+
|
123 |
+
# Create the video processing interface
|
124 |
+
tab2 = gr.Interface(
|
125 |
+
fn=process_video,
|
126 |
+
inputs=gr.File(label="Upload a video", type="video", accept=".mp4"),
|
127 |
+
outputs=gr.File(type="video/mp4"),
|
128 |
+
title='Video Processing',
|
129 |
+
examples=["example_video.mp4"]
|
130 |
+
)
|
131 |
+
|
132 |
+
# Create a Tabbed Interface
|
133 |
+
iface = gr.Tabbed(tab1, tab2)
|
134 |
+
|
135 |
+
# Launch the interface
|
136 |
iface.launch(share=True)
|