Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -138,29 +138,11 @@ def detect_video(video):
|
|
138 |
# Release resources
|
139 |
cap.release()
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
# video_path_1 = "data/c_base_detected.mp4"
|
144 |
-
# video_path_2 = "data/c_tuned_detected.mp4"
|
145 |
-
|
146 |
-
# return open(video_path_1, "rb"), open(video_path_2, "rb")
|
147 |
-
|
148 |
-
def display_two_videos():
|
149 |
-
path1= os.path.join(os.path.dirname(__file__), "data/c_base_detected.mp4")
|
150 |
-
path2= os.path.join(os.path.dirname(__file__), "data/c_tuned_detected.mp4")
|
151 |
-
cap1 = cv2.VideoCapture(path1)
|
152 |
-
cap2 = cv2.VideoCapture(path2)
|
153 |
-
|
154 |
-
while True:
|
155 |
-
ret1, frame1 = cap1.read()
|
156 |
-
ret2, frame2 = cap2.read()
|
157 |
-
if not ret1 or not ret2:
|
158 |
-
break
|
159 |
-
yield frame1, frame2 # Yield frames from both videos as a tuple
|
160 |
-
|
161 |
-
# return open(path1, "rb"), open(path2, "rb")
|
162 |
|
163 |
-
|
|
|
164 |
|
165 |
label_id_offset = 0
|
166 |
REPO_ID = "apailang/mytfodmodel"
|
@@ -231,12 +213,12 @@ tuned_image = gr.Interface(
|
|
231 |
|
232 |
|
233 |
video = gr.Interface(
|
234 |
-
fn=
|
235 |
-
inputs=[
|
236 |
-
|
237 |
-
|
|
|
238 |
],
|
239 |
-
outputs=[gr.Video(label="base model", interpretation="default"), gr.Video(label="Tuned model", interpretation="default")], # Specify video outputs
|
240 |
title="Comparing base vs tuned detected video",
|
241 |
description="using SSD mobile net V2 320x320. Model has been customed trained to detect Character of Luffy and Chopper"
|
242 |
)
|
|
|
138 |
# Release resources
|
139 |
cap.release()
|
140 |
|
141 |
+
a = os.path.join(os.path.dirname(__file__), "data/c_base_detected.mp4") # Video
|
142 |
+
b = os.path.join(os.path.dirname(__file__), "data/c_tuned_detected.mp4") # Video
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
144 |
+
def video_demo(video1, video2):
|
145 |
+
return [video, video2]
|
146 |
|
147 |
label_id_offset = 0
|
148 |
REPO_ID = "apailang/mytfodmodel"
|
|
|
213 |
|
214 |
|
215 |
video = gr.Interface(
|
216 |
+
fn=video_demo,
|
217 |
+
inputs=[gr.Video(label="base model Video"),gr.Textbox(label="tuned model Video")],
|
218 |
+
outputs=[gr.Video(label="base model", gr.Video(label="Tuned model")], # Specify video outputs
|
219 |
+
examples=[
|
220 |
+
[a, b]
|
221 |
],
|
|
|
222 |
title="Comparing base vs tuned detected video",
|
223 |
description="using SSD mobile net V2 320x320. Model has been customed trained to detect Character of Luffy and Chopper"
|
224 |
)
|