sxela commited on
Commit
5b4dfa2
1 Parent(s): defabb6
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -97,7 +97,7 @@ norm = transforms.Normalize(means,stds)
97
  norms = torch.tensor(means)[None,:,None,None].cuda()
98
  stds = torch.tensor(stds)[None,:,None,None].cuda()
99
 
100
- def inference_step(vid, start_sec, duration, out_fps):
101
  clip = vid.get_clip(start_sec, start_sec + duration)
102
  video_arr = torch.from_numpy(clip['video']).permute(3, 0, 1, 2)
103
  audio_arr = np.expand_dims(clip['audio'], 0)
@@ -114,12 +114,12 @@ def inference_step(vid, start_sec, duration, out_fps):
114
  output = (output * stds + norms).clip(0, 1) * 255.
115
 
116
  output_video = output.permute(0, 2, 3, 1).float().detach().cpu().numpy()
117
- output_video[1:] = output_video[1:]*(0.5) + output_video[:-1]*(0.5)
118
 
119
  return output_video, audio_arr, out_fps, audio_fps
120
 
121
 
122
- def predict_fn(filepath, start_sec, duration, out_fps):
123
  # out_fps=12
124
  vid = EncodedVideo.from_path(filepath)
125
  for i in range(duration):
@@ -127,7 +127,8 @@ def predict_fn(filepath, start_sec, duration, out_fps):
127
  vid = vid,
128
  start_sec = i + start_sec,
129
  duration = 1,
130
- out_fps = out_fps
 
131
  )
132
  gc.collect()
133
  if i == 0:
@@ -159,7 +160,7 @@ article = "<div style='text-align: center;'>ArcaneGan by <a href='https://twitte
159
 
160
  gr.Interface(
161
  predict_fn,
162
- inputs=[gr.inputs.Video(), gr.inputs.Slider(minimum=0, maximum=300, step=1, default=0), gr.inputs.Slider(minimum=1, maximum=10, step=1, default=2), gr.inputs.Slider(minimum=12, maximum=30, step=6, default=24)],
163
  outputs=gr.outputs.Video(),
164
  title='ArcaneGAN On Videos',
165
  description = description,
 
97
  norms = torch.tensor(means)[None,:,None,None].cuda()
98
  stds = torch.tensor(stds)[None,:,None,None].cuda()
99
 
100
+ def inference_step(vid, start_sec, duration, out_fps, interpolate):
101
  clip = vid.get_clip(start_sec, start_sec + duration)
102
  video_arr = torch.from_numpy(clip['video']).permute(3, 0, 1, 2)
103
  audio_arr = np.expand_dims(clip['audio'], 0)
 
114
  output = (output * stds + norms).clip(0, 1) * 255.
115
 
116
  output_video = output.permute(0, 2, 3, 1).float().detach().cpu().numpy()
117
+ if interpolate == 'Yes': output_video[1:] = output_video[1:]*(0.5) + output_video[:-1]*(0.5)
118
 
119
  return output_video, audio_arr, out_fps, audio_fps
120
 
121
 
122
+ def predict_fn(filepath, start_sec, duration, out_fps, interpolate):
123
  # out_fps=12
124
  vid = EncodedVideo.from_path(filepath)
125
  for i in range(duration):
 
127
  vid = vid,
128
  start_sec = i + start_sec,
129
  duration = 1,
130
+ out_fps = out_fps,
131
+ interpolate = interpolate
132
  )
133
  gc.collect()
134
  if i == 0:
 
160
 
161
  gr.Interface(
162
  predict_fn,
163
+ inputs=[gr.inputs.Video(), gr.inputs.Slider(minimum=0, maximum=300, step=1, default=0), gr.inputs.Slider(minimum=1, maximum=10, step=1, default=2), gr.inputs.Slider(minimum=12, maximum=30, step=6, default=24), gr.inputs.Radio(choices=['Yes','No'], type="value", default='Yes', label='Remove flickering'],
164
  outputs=gr.outputs.Video(),
165
  title='ArcaneGAN On Videos',
166
  description = description,