Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
visper / crop_videos.py
yasserTII's picture
Upload folder using huggingface_hub
f742fc3 verified
raw
history blame
4.38 kB
import json
import numpy as np
import os
from tqdm import tqdm
import subprocess
from glob import glob
import argparse
import time
from utils import crop_video, crop_face, write_video, crop_and_save_audio
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
import sys
'''
Crop the untrimmed videos into multiple clips using corresponding start and end times, bounding boxes and face landmarks.
Usage:
python crop_videos.py --video_dir /path/to/25-fps-videos --save_path /path/to/save/the/clips --json /path/to/json/file
To save videos using ffmpeg, add "--use_ffmpeg True". This takes additional time but saves disk space.
To additionally save audio as separate wav files, add "--save_audio True"
To merge audio with video and save as a single mp4, add "--merge_audio True"
'''
def write_clip(metadata, vid_p, args):
'''
param metadata: dict containing start, end, bounding boxes, landmarks
param vid_p: path to original untrimmed video at 25fps
param args: main args
'''
for k, clip in enumerate(metadata):
# get the clip frames and corresponding landmarks
video, landmarks = crop_video(vid_p, clip)
# get the cropped sequence around the mouth using the landmarks
crop_seq = crop_face(video, landmarks)
save_video_path = os.path.join(args.save_path, 'videos', vid_p.split('/')[-1][:-4], f'{str(k).zfill(5)}.mp4')
save_audio_path = save_video_path.replace('.mp4','.wav')
# get the audio part of the clip
if args.save_audio or args.merge_audio:
crop_and_save_audio(vid_p, save_audio_path, clip['start'], clip['end'])
# write clip to disk
write_video(save_video_path, crop_seq, save_audio_path, merge_audio=args.merge_audio, use_ffmpeg=args.use_ffmpeg)
return
def main(args):
savepath = args.save_path
json_path = args.json
vid_dir = args.video_dir
video_list = glob(os.path.join(vid_dir, '25_fps_videos*', '*.mp4'))
print(f'Loading json file {json_path}')
data = json.load(open(json_path,'r'))
print(f'Total number of videos {len(video_list)}. Json length {len(data)}')
video_ids = list(data.keys())
count_clips = 0
futures = []
writer_str = 'Ffmpeg' if args.use_ffmpeg else 'cv2.VideoWriter'
print(f'Using {writer_str} to save the cropped clips.')
with tqdm(total=len(video_ids), file=sys.stdout) as progress:
with ProcessPoolExecutor() as executor:
for z in video_ids:
idx = [k for k, i in enumerate(video_list) if z in i]
metadata = data[z]
vid_p = video_list[idx[0]]
os.makedirs(os.path.join(savepath, 'videos', vid_p.split('/')[-1][:-4]), exist_ok=True)
future = executor.submit(write_clip, metadata, vid_p, args)
futures.append(future)
for _ in as_completed(futures):
progress.update()
print(f'Cropping videos completed.')
print(f'Getting the labels.')
labels = {}
for z in tqdm(video_ids):
metadata = data[z]
for k, clip in enumerate(metadata):
labk = clip['label']
fi = os.path.join('videos', vid_p.split('/')[-1][:-4], f'{str(k).zfill(5)}.mp4')
labels[fi] = labk
label_file = f'{args.save_path}/labels.json'
with open(label_file, 'w', encoding='utf-8') as f:
json.dump(labels, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Vhisper crop videos')
parser.add_argument('--save_path', type=str, default='', help='Path for saving.')
parser.add_argument('--json', type=str, default='', help='Json path')
parser.add_argument('--video_dir', type=str, default='', help='Path to directory where original videos are stored.')
parser.add_argument('--save_audio', type=bool, default=False, help='Whether to save audio info.')
parser.add_argument('--merge_audio', type=bool, default=False, help='Whether to merge audio with the video when saving.')
parser.add_argument('--use_ffmpeg', type=bool, default=False, help='Whether to use ffmpeg instead of cv2 for saving the video.')
args = parser.parse_args()
tic = time.time()
main(args)
print(f'Elpased total time for processing: {time.time()-tic} seconds')