Spaces:
Runtime error
Runtime error
wav2lip use /tmp as temp output
Browse files
wav2lip/evaluation/gen_videos_from_filelist.py
CHANGED
@@ -164,9 +164,9 @@ def main():
|
|
164 |
audio_src = os.path.join(data_root, audio_src) + '.mp4'
|
165 |
video = os.path.join(data_root, video) + '.mp4'
|
166 |
|
167 |
-
command = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'.format(audio_src, '
|
168 |
subprocess.call(command, shell=True)
|
169 |
-
temp_audio = '
|
170 |
|
171 |
wav = audio.load_wav(temp_audio, 16000)
|
172 |
mel = audio.melspectrogram(wav)
|
@@ -208,7 +208,7 @@ def main():
|
|
208 |
for i, (img_batch, mel_batch, frames, coords) in enumerate(gen):
|
209 |
if i == 0:
|
210 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
211 |
-
out = cv2.VideoWriter('
|
212 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
213 |
|
214 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
@@ -231,7 +231,7 @@ def main():
|
|
231 |
vid = os.path.join(args.results_dir, '{}.mp4'.format(idx))
|
232 |
|
233 |
command = 'ffmpeg -loglevel panic -y -i {} -i {} -strict -2 -q:v 1 {}'.format(temp_audio,
|
234 |
-
'
|
235 |
subprocess.call(command, shell=True)
|
236 |
|
237 |
if __name__ == '__main__':
|
|
|
164 |
audio_src = os.path.join(data_root, audio_src) + '.mp4'
|
165 |
video = os.path.join(data_root, video) + '.mp4'
|
166 |
|
167 |
+
command = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'.format(audio_src, '/tmp/temp.wav')
|
168 |
subprocess.call(command, shell=True)
|
169 |
+
temp_audio = '/tmp/temp.wav'
|
170 |
|
171 |
wav = audio.load_wav(temp_audio, 16000)
|
172 |
mel = audio.melspectrogram(wav)
|
|
|
208 |
for i, (img_batch, mel_batch, frames, coords) in enumerate(gen):
|
209 |
if i == 0:
|
210 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
211 |
+
out = cv2.VideoWriter('/tmp/result.avi',
|
212 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
213 |
|
214 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
|
|
231 |
vid = os.path.join(args.results_dir, '{}.mp4'.format(idx))
|
232 |
|
233 |
command = 'ffmpeg -loglevel panic -y -i {} -i {} -strict -2 -q:v 1 {}'.format(temp_audio,
|
234 |
+
'/tmp/result.avi', vid)
|
235 |
subprocess.call(command, shell=True)
|
236 |
|
237 |
if __name__ == '__main__':
|
wav2lip/evaluation/real_videos_inference.py
CHANGED
@@ -214,9 +214,9 @@ def main():
|
|
214 |
audio_src = os.path.join(args.data_root, audio_src)
|
215 |
video = os.path.join(args.data_root, video)
|
216 |
|
217 |
-
command = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'.format(audio_src, '
|
218 |
subprocess.call(command, shell=True)
|
219 |
-
temp_audio = '
|
220 |
|
221 |
wav = audio.load_wav(temp_audio, 16000)
|
222 |
mel = audio.melspectrogram(wav)
|
@@ -275,7 +275,7 @@ def main():
|
|
275 |
if i == 0:
|
276 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
277 |
|
278 |
-
out = cv2.VideoWriter('
|
279 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
280 |
|
281 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
@@ -296,8 +296,8 @@ def main():
|
|
296 |
out.release()
|
297 |
|
298 |
vid = os.path.join(args.results_dir, '{}.mp4'.format(idx))
|
299 |
-
command = 'ffmpeg -loglevel panic -y -i {} -i {} -strict -2 -q:v 1 {}'.format('
|
300 |
-
'
|
301 |
subprocess.call(command, shell=True)
|
302 |
|
303 |
|
|
|
214 |
audio_src = os.path.join(args.data_root, audio_src)
|
215 |
video = os.path.join(args.data_root, video)
|
216 |
|
217 |
+
command = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'.format(audio_src, '/tmp/temp.wav')
|
218 |
subprocess.call(command, shell=True)
|
219 |
+
temp_audio = '/tmp/temp.wav'
|
220 |
|
221 |
wav = audio.load_wav(temp_audio, 16000)
|
222 |
mel = audio.melspectrogram(wav)
|
|
|
275 |
if i == 0:
|
276 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
277 |
|
278 |
+
out = cv2.VideoWriter('/tmp/result.avi',
|
279 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
280 |
|
281 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
|
|
296 |
out.release()
|
297 |
|
298 |
vid = os.path.join(args.results_dir, '{}.mp4'.format(idx))
|
299 |
+
command = 'ffmpeg -loglevel panic -y -i {} -i {} -strict -2 -q:v 1 {}'.format('/tmp/temp.wav',
|
300 |
+
'/tmp/result.avi', vid)
|
301 |
subprocess.call(command, shell=True)
|
302 |
|
303 |
|
wav2lip/inference.py
CHANGED
@@ -253,7 +253,7 @@ def main():
|
|
253 |
print ("Model loaded")
|
254 |
|
255 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
256 |
-
out = cv2.VideoWriter('
|
257 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
258 |
|
259 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
@@ -273,7 +273,7 @@ def main():
|
|
273 |
|
274 |
out.release()
|
275 |
|
276 |
-
command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(args.audio, '
|
277 |
subprocess.call(command, shell=platform.system() != 'Windows')
|
278 |
|
279 |
if __name__ == '__main__':
|
|
|
253 |
print ("Model loaded")
|
254 |
|
255 |
frame_h, frame_w = full_frames[0].shape[:-1]
|
256 |
+
out = cv2.VideoWriter('/tmp/result.avi',
|
257 |
cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))
|
258 |
|
259 |
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
|
|
|
273 |
|
274 |
out.release()
|
275 |
|
276 |
+
command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(args.audio, '/tmp/result.avi', args.outfile)
|
277 |
subprocess.call(command, shell=platform.system() != 'Windows')
|
278 |
|
279 |
if __name__ == '__main__':
|