apailang commited on
Commit
ff9ef5b
β€’
1 Parent(s): e2e1e4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -15
app.py CHANGED
@@ -138,6 +138,12 @@ def detect_video(video):
138
  # Release resources
139
  cap.release()
140
 
 
 
 
 
 
 
141
 
142
  label_id_offset = 0
143
  REPO_ID = "apailang/mytfodmodel"
@@ -187,25 +193,35 @@ tuned_image = gr.Interface(
187
 
188
 
189
 
190
- a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video
191
- b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video
192
- c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
- video_out_file = os.path.join(samples_folder,'detected' + '.mp4')
195
 
196
- stt_demo = gr.Interface(
197
- fn=detect_video,
198
- inputs=gr.Video(),
199
- outputs=gr.Video(type="mp4",label="Detected Video"),
200
- examples=[
201
- [a],
202
- [b],
203
- [c],
204
- ],
205
- cache_examples=False
206
  )
207
 
208
- demo = gr.TabbedInterface([base_image,tuned_image, stt_demo], ["Image (base model)","Image (tuned model)", "Video"])
 
209
 
210
  if __name__ == "__main__":
211
  demo.launch()
 
138
  # Release resources
139
  cap.release()
140
 
141
+ def display_two_videos():
142
+ # Replace these paths with the paths to your video files
143
+ video_path_1 = "data/c_base_detected.mp4"
144
+ video_path_2 = "data/c_tuned_detected.mp4"
145
+
146
+ return open(video_path_1, "rb"), open(video_path_2, "rb")
147
 
148
  label_id_offset = 0
149
  REPO_ID = "apailang/mytfodmodel"
 
193
 
194
 
195
 
196
+ # a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video
197
+ # b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video
198
+ # c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video
199
+
200
+ # video_out_file = os.path.join(samples_folder,'detected' + '.mp4')
201
+
202
+ # stt_demo = gr.Interface(
203
+ # fn=display_two_videos,
204
+ # inputs=gr.Video(),
205
+ # outputs=gr.Video(type="mp4",label="Detected Video"),
206
+ # examples=[
207
+ # [a],
208
+ # [b],
209
+ # [c],
210
+ # ],
211
+ # cache_examples=False
212
+ # )
213
 
 
214
 
215
+
216
+ video = gr.Interface(
217
+ fn=display_two_videos,
218
+ outputs=[gr.outputs.Video(label="base model"), gr.outputs.Video(lablel="Tuned model")], # Specify video outputs
219
+ title="Two Videos Display",
220
+ description="Displays two videos side by side."
 
 
 
 
221
  )
222
 
223
+ demo = gr.TabbedInterface([base_image,tuned_image, video], ["Image (base model)","Image (tuned model)", "display Video"])
224
+
225
 
226
  if __name__ == "__main__":
227
  demo.launch()