ysharma HF staff commited on
Commit
bd4225d
1 Parent(s): c353466
Files changed (1) hide show
  1. app.py +67 -27
app.py CHANGED
@@ -14,7 +14,7 @@ import cv2
14
  API_URL = "https://api-inference.huggingface.co/models/facebook/wav2vec2-base-960h"
15
  HF_TOKEN = os.environ["HF_TOKEN"]
16
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
17
-
18
 
19
  def generate_transcripts(in_video): #generate_gifs(in_video, gif_transcript):
20
  print("********* Inside generate_transcripts() **********")
@@ -73,9 +73,10 @@ def generate_gifs(in_video, gif_transcript, words, words_timestamp, vid_speed):
73
  #generated .gif image
74
  #gif_out, vid_out = gen_moviepy_gif(in_video, start_seconds, end_seconds)
75
  print(f"vid_speed from SLider is : {vid_speed}")
76
- slomo_vid = gen_moviepy_gif(in_video, start_seconds, end_seconds, float(vid_speed))
77
 
78
- return slomo_vid
 
 
79
 
80
 
81
  #calling the hosted model
@@ -160,30 +161,43 @@ def get_gif_timestamps(giflist_indxs, words_timestamp):
160
 
161
 
162
  #extracting the video and building and serving a .gif image
163
- def gen_moviepy_gif(in_video, start_seconds, end_seconds, vid_speed):
164
  print("******** inside moviepy_gif () ***************")
165
  #sample
166
- video_path = "./ShiaLaBeouf.mp4"
167
  video = mp.VideoFileClip(in_video)
168
  #video = mp.VideoFileClip(video_path)
169
 
 
170
  final_clip = video.subclip(start_seconds, end_seconds)
171
-
 
172
  #slowmo
173
  print(f"vid_speed from calling function is : {vid_speed}")
174
- slomo_clip = final_clip.fx(mp.vfx.speedx, vid_speed)
175
- slomo_clip.write_videofile("slomo.mp4")
 
 
 
 
 
 
 
 
176
 
177
- #writing to RAM
178
- final_clip.write_gif("gifimage.gif") #, program='ffmpeg', tempfiles=True, fps=15, fuzz=3)
179
- final_clip.write_videofile("gifimage.mp4")
 
 
 
180
  final_clip.close()
181
  #reading in a variable
182
- gif_img = mp.VideoFileClip("gifimage.gif")
183
  #gif_vid = mp.VideoFileClip("gifimage.mp4")
184
  #im = Image.open("gifimage.gif")
185
  #vid_cap = cv2.VideoCapture('gifimage.mp4')
186
- return "slomo.mp4" #"gifimage.gif", "gifimage.mp4" #im, gif_img, gif_vid, vid_cap, #"gifimage.mp4"
187
 
188
 
189
  sample_video = ['./ShiaLaBeouf.mp4']
@@ -218,27 +232,53 @@ with demo:
218
  #to generate and display transcriptions for input video
219
  text_transcript = gr.Textbox(label="Transcripts", lines = 10, interactive = True )
220
 
221
- #Just to move dgata between function hence keeping visible false
222
  text_words = gr.Textbox(visible=False)
223
  text_wordstimestamps = gr.Textbox(visible=False)
 
 
 
 
 
 
 
 
224
 
225
- #to copy paste required gif transcript / or to populate by itslef on pressing the button
226
- text_gif_transcript = gr.Textbox(label="Transcripts", placeholder="Copy paste transcripts here to create GIF image" , lines = 3, interactive = True )
227
-
228
- def load_gif_text(text):
229
- print("****** inside load_gif_text() ******")
230
- print("text for gif is : ", text)
231
  return text
232
 
233
- text_transcript.change(load_gif_text, text_transcript, text_gif_transcript )
234
 
235
  #out_gif = gr.Image(label="Generated GIF image")
236
- out_slomo_vid = gr.Video(label="Generated GIF image")
237
 
238
  with gr.Row():
239
- button_transcript = gr.Button("Generate transcripts")
240
- button_gifs = gr.Button("Create SloMo")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
 
 
 
 
 
242
  with gr.Row():
243
  #to render video example on mouse hover/click
244
  examples.render()
@@ -250,11 +290,11 @@ with demo:
250
 
251
  examples.click(load_examples, examples, input_video)
252
 
253
- vid_speed = gr.Slider(0.1,0.9, step=0.1)
254
 
255
 
256
  button_transcript.click(generate_transcripts, input_video, [text_transcript, text_words, text_wordstimestamps ])
257
- button_gifs.click(generate_gifs, [input_video, text_gif_transcript, text_words, text_wordstimestamps, vid_speed], out_slomo_vid )
258
-
259
 
260
  demo.launch(debug=True)
 
14
  API_URL = "https://api-inference.huggingface.co/models/facebook/wav2vec2-base-960h"
15
  HF_TOKEN = os.environ["HF_TOKEN"]
16
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
17
+ video_list = []
18
 
19
  def generate_transcripts(in_video): #generate_gifs(in_video, gif_transcript):
20
  print("********* Inside generate_transcripts() **********")
 
73
  #generated .gif image
74
  #gif_out, vid_out = gen_moviepy_gif(in_video, start_seconds, end_seconds)
75
  print(f"vid_speed from SLider is : {vid_speed}")
 
76
 
77
+ speededit_vids_list, concat_vid = gen_moviepy_gif(in_video, start_seconds, end_seconds, float(vid_speed), video_list)
78
+
79
+ return concat_vid #speededit_vids_list
80
 
81
 
82
  #calling the hosted model
 
161
 
162
 
163
  #extracting the video and building and serving a .gif image
164
+ def gen_moviepy_gif(in_video, start_seconds, end_seconds, vid_speed, vid_list):
165
  print("******** inside moviepy_gif () ***************")
166
  #sample
167
+ #video_path = "./ShiaLaBeouf.mp4"
168
  video = mp.VideoFileClip(in_video)
169
  #video = mp.VideoFileClip(video_path)
170
 
171
+ leftover_clip_start = video.subclip(0, int(start_seconds) + float("{:.2f}".format(1-start_seconds%1))).without_audio() #float("{:.2f}".format(1-a%1))
172
  final_clip = video.subclip(start_seconds, end_seconds)
173
+ leftover_clip_end = video.subclip(int(end_seconds) + float("{:.2f}".format(1-end_seconds%1)) ).without_audio() #end=None
174
+
175
  #slowmo
176
  print(f"vid_speed from calling function is : {vid_speed}")
177
+ speededit_clip = final_clip.fx(mp.vfx.speedx, vid_speed)
178
+ speededit_clip = speededit_clip.without_audio()
179
+
180
+ #concat
181
+ concatenated_clip = mp.concatenate_videoclips([leftover_clip_start, speededit_clip, leftover_clip_end])
182
+ concatenated_clip.write_videofile("concat.mp4")
183
+
184
+ filename = f"speededit{len(vid_list)}"
185
+ speededit_clip.write_videofile(filename) #("speededit0.mp4")
186
+ vid_list.append(filename) #["speededit0.mp4"]
187
 
188
+ if len(vid_list) == 1:
189
+ speededit_clip.write_videofile("slomo.mp4")
190
+
191
+ #writing to RAM - gif and smaller clip
192
+ #final_clip.write_gif("gifimage.gif") #, program='ffmpeg', tempfiles=True, fps=15, fuzz=3)
193
+ #final_clip.write_videofile("gifimage.mp4")
194
  final_clip.close()
195
  #reading in a variable
196
+ #gif_img = mp.VideoFileClip("gifimage.gif")
197
  #gif_vid = mp.VideoFileClip("gifimage.mp4")
198
  #im = Image.open("gifimage.gif")
199
  #vid_cap = cv2.VideoCapture('gifimage.mp4')
200
+ return vid_list, "concat.mp4" #"slomo.mp4", "timelapse.mp4", #"gifimage.gif", "gifimage.mp4" #im, gif_img, gif_vid, vid_cap, #"gifimage.mp4"
201
 
202
 
203
  sample_video = ['./ShiaLaBeouf.mp4']
 
232
  #to generate and display transcriptions for input video
233
  text_transcript = gr.Textbox(label="Transcripts", lines = 10, interactive = True )
234
 
235
+ #Just to move data between function hence keeping visible false
236
  text_words = gr.Textbox(visible=False)
237
  text_wordstimestamps = gr.Textbox(visible=False)
238
+
239
+ with gr.Row():
240
+ button_transcript = gr.Button("Generate transcripts")
241
+
242
+ #For SlowMo
243
+ with gr.Row():
244
+ #to copy paste required gif transcript / or to populate by itself on pressing the button
245
+ text_slomo_transcript = gr.Textbox(label="Transcripts", placeholder="Copy paste transcripts here to create SlowMo Video" , lines = 5, interactive = True )
246
 
247
+ def load_slomo_text(text):
248
+ print("****** inside load_slomo_text() ******")
249
+ print("text for slomo video is : ", text)
 
 
 
250
  return text
251
 
252
+ text_transcript.change(load_slomo_text, text_transcript, text_slomo_transcript )
253
 
254
  #out_gif = gr.Image(label="Generated GIF image")
255
+ out_slomo_vid = gr.Video(label="Generated SlowMo Video")
256
 
257
  with gr.Row():
258
+ #button_transcript = gr.Button("Generate transcripts")
259
+ vid_speed_slomo = gr.Slider(0.1,0.9, step=0.1)
260
+ button_slomo = gr.Button("Create SloMo")
261
+
262
+ #For TimeLapse
263
+ with gr.Row():
264
+ #to copy paste required gif transcript / or to populate by itself on pressing the button
265
+ text_timelapse_transcript = gr.Textbox(label="Transcripts", placeholder="Copy paste transcripts here to create GIF image" , lines = 5, interactive = True )
266
+
267
+ def load_timelapse_text(text):
268
+ print("****** inside load_timelapse_text() ******")
269
+ print("text for timelapse video is : ", text)
270
+ return text
271
+
272
+ text_transcript.change(load_timelapse_text, text_transcript, text_timelapse_transcript )
273
+
274
+ #out_gif = gr.Image(label="Generated GIF image")
275
+ out_timelapse_vid = gr.Video(label="Generated TimeLapse Video")
276
 
277
+ with gr.Row():
278
+ #button_transcript = gr.Button("Generate transcripts")
279
+ vid_speed_timelapse = gr.Slider(0.1,0.9, step=0.1)
280
+ button_timelapse = gr.Button("Create TimeLapse")
281
+
282
  with gr.Row():
283
  #to render video example on mouse hover/click
284
  examples.render()
 
290
 
291
  examples.click(load_examples, examples, input_video)
292
 
293
+ #vid_speed = gr.Slider(0.1,0.9, step=0.1)
294
 
295
 
296
  button_transcript.click(generate_transcripts, input_video, [text_transcript, text_words, text_wordstimestamps ])
297
+ button_slomo.click(generate_gifs, [input_video, text_slomo_transcript, text_words, text_wordstimestamps, vid_speed_slomo], out_slomo_vid )
298
+ button_timelapse.click(generate_gifs, [out_slomo_vid, text_timelapse_transcript, text_words, text_wordstimestamps, vid_speed_timelapse], out_timelapse_vid )
299
 
300
  demo.launch(debug=True)