apailang commited on
Commit
daf641a
β€’
1 Parent(s): 7262656

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -138,13 +138,17 @@ def detect_video(video):
138
  # Release resources
139
  cap.release()
140
 
141
- def display_two_videos():
142
- # Replace these paths with the paths to your video files
143
- video_path_1 = "data/c_base_detected.mp4"
144
- video_path_2 = "data/c_tuned_detected.mp4"
145
 
 
 
 
146
  return open(video_path_1, "rb"), open(video_path_2, "rb")
147
 
 
148
  label_id_offset = 0
149
  REPO_ID = "apailang/mytfodmodel"
150
  detection_model = load_model(REPO_ID)
@@ -215,12 +219,16 @@ tuned_image = gr.Interface(
215
 
216
  video = gr.Interface(
217
  fn=display_two_videos,
 
 
 
 
218
  outputs=[gr.Video(label="base model"), gr.Video(label="Tuned model")], # Specify video outputs
219
- title="Two Videos Display",
220
- description="Displays two videos side by side."
221
  )
222
 
223
- demo = gr.TabbedInterface([base_image,tuned_image, video], ["Image (base model)","Image (tuned model)", "display Video"])
224
 
225
 
226
  if __name__ == "__main__":
 
138
  # Release resources
139
  cap.release()
140
 
141
+ # def display_two_videos():
142
+ # # Replace these paths with the paths to your video files
143
+ # video_path_1 = "data/c_base_detected.mp4"
144
+ # video_path_2 = "data/c_tuned_detected.mp4"
145
 
146
+ # return open(video_path_1, "rb"), open(video_path_2, "rb")
147
+
148
+ def display_two_videos(video_path_1, video_path_2):
149
  return open(video_path_1, "rb"), open(video_path_2, "rb")
150
 
151
+
152
  label_id_offset = 0
153
  REPO_ID = "apailang/mytfodmodel"
154
  detection_model = load_model(REPO_ID)
 
219
 
220
  video = gr.Interface(
221
  fn=display_two_videos,
222
+ inputs=[
223
+ gr.inputs.Textbox(label="Path to detected base model Video",value="data/c_base_detected.mp4",info="video has been preprocessed"),
224
+ gr.inputs.Textbox(label="Path to tuned base model Video",value="data/c_tuned_detected.mp4",info="video has been preprocessed")
225
+ ],
226
  outputs=[gr.Video(label="base model"), gr.Video(label="Tuned model")], # Specify video outputs
227
+ title="Comparing base vs tuned detected video",
228
+ description="using SSD mobile net V2 320x320. Model has been customed trained to detect Character of Luffy and Chopper"
229
  )
230
 
231
+ demo = gr.TabbedInterface([base_image,tuned_image, video], ["Image (Base Model)","Image (Tuned Model)", "Display Detected Video"])
232
 
233
 
234
  if __name__ == "__main__":