Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
yasserTII commited on
Commit
1ce5cbe
·
verified ·
1 Parent(s): 3591539

Delete utils.py

Browse files
Files changed (1) hide show
  1. utils.py +0 -262
utils.py DELETED
@@ -1,262 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- from scipy import signal
4
- import os
5
- import subprocess
6
- import tempfile
7
-
8
- CROP_SCALE = 0.4
9
- WINDOW_MARGIN = 12
10
- START_IDX, STOP_IDX = 3, 5
11
- STABLE_POINTS = (36, 45, 33, 48, 54)
12
- CROP_HEIGHT, CROP_WIDTH = 96, 96
13
-
14
- # PATH='/home/users/u100438/home200093/dataset_release/'
15
- REFERENCE = np.load(os.path.join( os.path.dirname(__file__), '20words_mean_face.npy'))
16
-
17
-
18
- def crop_and_save_audio(mp4_path: str, saving_path:str, start_audio: float, end_audio: float) -> None:
19
- """
20
- Crops original audio corresponding to the start and end time.
21
- Saves it as wav file with single channel and 16kHz sampling rate.
22
-
23
- :param mp4_path: str, path to original video.
24
- :param saving_path: str, path where audio will be saved. SHOULD END WITH .wav
25
- :param start_audio: float, start time of clip in seconds
26
- :param end_audio: float, end time of clip in seconds
27
- :return: None.
28
- """
29
-
30
- # write audio.
31
- command = f"ffmpeg -loglevel error -y -i {mp4_path} -ss {start_audio} -to {end_audio} -vn -acodec pcm_s16le -ar 16000 -ac 1 {saving_path}"
32
- subprocess.call(command, shell=True)
33
-
34
-
35
-
36
- def crop_video(vid_path: str, clip_data: dict):
37
- '''
38
- Reads the video frames of video (in vid_path) between clip_data['start'] and clip_data['end'] times.
39
- Crops the faces in these frames using bounding boxes given by clip_data['bboxs']
40
- Returns sequence of faces and clip['landmarks'] aligned to 224x224 resolution.
41
- '''
42
- cap = cv2.VideoCapture(vid_path)
43
-
44
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
45
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
46
- num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
47
- start_frame, end_frame = round(clip_data['start']*25), round(clip_data['end']*25)
48
- clip_frames = end_frame - start_frame
49
- assert end_frame <= num_frames, f'End frame ({end_frame}) exceeds total number of frames ({num_frames})'
50
-
51
- landmarks_n, bboxs_n = np.array(clip_data['landmarks']), np.array(clip_data['bboxs'])
52
- bboxs = np.multiply(bboxs_n, [frame_width, frame_height, frame_width, frame_height])
53
- landmarks = np.multiply(landmarks_n, [frame_width, frame_height])
54
- assert len(landmarks) == clip_frames, f'Landmarks length ({len(landmarks)}) does not match the number of frames in the clip ({clip_frames})'
55
-
56
- dets = {'x':[], 'y':[], 's':[]}
57
- for det in bboxs:
58
- dets['s'].append(max((det[3]-det[1]),(det[2]-det[0]))/2)
59
- dets['y'].append((det[1]+det[3])/2) # crop center x
60
- dets['x'].append((det[0]+det[2])/2) # crop center y
61
-
62
- # Smooth detections
63
- dets['s'] = signal.medfilt(dets['s'],kernel_size=13)
64
- dets['x'] = signal.medfilt(dets['x'],kernel_size=13)
65
- dets['y'] = signal.medfilt(dets['y'],kernel_size=13)
66
-
67
- image_seq = []
68
- current_frame = start_frame
69
- cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
70
-
71
- while current_frame < end_frame:
72
- ret, frame = cap.read()
73
- count = current_frame - start_frame
74
- current_frame += 1
75
-
76
- if not ret:
77
- break
78
-
79
- bs = dets['s'][count] # Detection box size
80
- bsi = int(bs*(1+2*CROP_SCALE)) # Pad videos by this amount
81
-
82
- image = frame
83
- lands = landmarks[count]
84
-
85
- frame_ = np.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
86
- my = dets['y'][count]+bsi # BBox center Y
87
- mx = dets['x'][count]+bsi # BBox center X
88
-
89
- face = frame_[int(my-bs):int(my+bs*(1+2*CROP_SCALE)),int(mx-bs*(1+CROP_SCALE)):int(mx+bs*(1+CROP_SCALE))]
90
-
91
- ## lands translation and scaling
92
- lands[:,0] -= int(mx-bs*(1+CROP_SCALE) - bsi)
93
- lands[:,1] -= int(my - bs - bsi)
94
- lands[:,0] *= (224/face.shape[1])
95
- lands[:,1] *= (224/face.shape[0])
96
-
97
- image_seq.append(cv2.resize(face,(224,224)))
98
-
99
- image_seq = np.array(image_seq)
100
-
101
- return image_seq, landmarks
102
-
103
-
104
-
105
- def landmarks_interpolate(landmarks):
106
- """landmarks_interpolate.
107
-
108
- :param landmarks: List, the raw landmark (in-place)
109
- """
110
- valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
111
- if not valid_frames_idx:
112
- return None
113
- for idx in range(1, len(valid_frames_idx)):
114
- if valid_frames_idx[idx] - valid_frames_idx[idx-1] == 1:
115
- continue
116
- else:
117
- landmarks = linear_interpolate(landmarks, valid_frames_idx[idx-1], valid_frames_idx[idx])
118
- valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
119
- # -- Corner case: keep frames at the beginning or at the end failed to be detected.
120
- if valid_frames_idx:
121
- landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
122
- landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
123
- valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
124
- assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
125
- return landmarks
126
-
127
-
128
- def crop_patch(image_seq, landmarks):
129
- """crop_patch.
130
-
131
- :param video_pathname: str, the filename for the processed video.
132
- :param landmarks: List, the interpolated landmarks.
133
- """
134
- frame_idx = 0
135
- sequence = []
136
- for frame in image_seq:
137
-
138
- window_margin = min(WINDOW_MARGIN // 2, frame_idx, len(landmarks) - 1 - frame_idx)
139
- smoothed_landmarks = np.mean([landmarks[x] for x in range(frame_idx - window_margin, frame_idx + window_margin + 1)], axis=0)
140
- smoothed_landmarks += landmarks[frame_idx].mean(axis=0) - smoothed_landmarks.mean(axis=0)
141
- transformed_frame, transformed_landmarks = affine_transform(frame, smoothed_landmarks, REFERENCE)
142
- sequence.append( cut_patch( transformed_frame, transformed_landmarks[START_IDX : STOP_IDX], CROP_HEIGHT//2, CROP_WIDTH//2,))
143
- frame_idx += 1
144
-
145
- return np.array(sequence)
146
-
147
- def affine_transform(frame, landmarks, reference,
148
- target_size=(256, 256),
149
- reference_size=(256, 256),
150
- stable_points=STABLE_POINTS,
151
- interpolation=cv2.INTER_LINEAR,
152
- border_mode=cv2.BORDER_CONSTANT,
153
- border_value=0
154
- ):
155
- """affine_transform.
156
-
157
- :param frame: numpy.array, the input sequence.
158
- :param landmarks: List, the tracked landmarks.
159
- :param reference: numpy.array, the neutral reference frame.
160
- :param target_size: tuple, size of the output image.
161
- :param reference_size: tuple, size of the neural reference frame.
162
- :param stable_points: tuple, landmark idx for the stable points.
163
- :param interpolation: interpolation method to be used.
164
- :param border_mode: Pixel extrapolation method .
165
- :param border_value: Value used in case of a constant border. By default, it is 0.
166
- """
167
-
168
- lands = [landmarks[x] for x in range(5)]
169
-
170
- stable_reference = np.vstack([reference[x] for x in stable_points])
171
- stable_reference[:, 0] -= (reference_size[0] - target_size[0]) / 2.0
172
- stable_reference[:, 1] -= (reference_size[1] - target_size[1]) / 2.0
173
-
174
- # Warp the face patch and the landmarks
175
- transform = cv2.estimateAffinePartial2D(np.vstack(lands), stable_reference, method=cv2.LMEDS)[0]
176
- transformed_frame = cv2.warpAffine(
177
- frame,
178
- transform,
179
- dsize=(target_size[0], target_size[1]),
180
- flags=interpolation,
181
- borderMode=border_mode,
182
- borderValue=border_value,
183
- )
184
- transformed_landmarks = np.matmul(landmarks, transform[:, :2].transpose()) + transform[:, 2].transpose()
185
-
186
- return transformed_frame, transformed_landmarks
187
-
188
-
189
- def cut_patch(img, landmarks, height, width, threshold=5):
190
- """cut_patch.
191
-
192
- :param img: ndarray, an input image.
193
- :param landmarks: ndarray, the corresponding landmarks for the input image.
194
- :param height: int, the distance from the centre to the side of of a bounding box.
195
- :param width: int, the distance from the centre to the side of of a bounding box.
196
- :param threshold: int, the threshold from the centre of a bounding box to the side of image.
197
- """
198
- center_x, center_y = np.mean(landmarks, axis=0)
199
-
200
- if center_y - height < 0:
201
- center_y = height
202
- if center_y - height < 0 - threshold:
203
- raise Exception('too much bias in height')
204
- if center_x - width < 0:
205
- center_x = width
206
- if center_x - width < 0 - threshold:
207
- raise Exception('too much bias in width')
208
-
209
- if center_y + height > img.shape[0]:
210
- center_y = img.shape[0] - height
211
- if center_y + height > img.shape[0] + threshold:
212
- raise Exception('too much bias in height')
213
- if center_x + width > img.shape[1]:
214
- center_x = img.shape[1] - width
215
- if center_x + width > img.shape[1] + threshold:
216
- raise Exception('too much bias in width')
217
-
218
- cutted_img = np.copy(img[ int(round(center_y) - round(height)): int(round(center_y) + round(height)),
219
- int(round(center_x) - round(width)): int(round(center_x) + round(width))])
220
- return cutted_img
221
-
222
-
223
- def crop_face(image_seq, landmarks):
224
- # Interpolate the landmarks
225
- preprocessed_landmarks = landmarks_interpolate(list(landmarks))
226
- # crop the face to obtain a sequence of 96x96 sized mouth rois
227
- crop_seq = crop_patch(image_seq, preprocessed_landmarks)
228
-
229
- return crop_seq
230
-
231
- def merge_audio_video(tmp_path, audio_path, save_video_path):
232
- # Will merge the corresponding audio and video tracks of the clip. The associated .wav file will be removed.
233
- command = f"ffmpeg -loglevel error -y -i {tmp_path} -i {audio_path} -c:v libx264 -c:a aac -ar 16000 -ac 1 {save_video_path}"
234
- tval = subprocess.call(command, shell=True)
235
- tval = subprocess.call(f'rm {tmp_path}', shell=True)
236
- tval = subprocess.call(f'rm {audio_path}', shell=True)
237
-
238
- def convert_ffmpeg(vid_path):
239
- # converts the mpeg4 video to h264 using ffmpeg. Saves disk space, but takes additional time
240
- tmp_path = vid_path[:-4] + 'temp2.mp4'
241
- cmd = f"cp {vid_path} {tmp_path}"
242
- tval = subprocess.call(cmd, shell=True)
243
- cmd = f"ffmpeg -loglevel error -i {tmp_path} -r 25 -vcodec libx264 -q:v 1 -y {vid_path}"
244
- tval = subprocess.call(cmd, shell=True)
245
- tval = subprocess.call(f"rm {tmp_path}", shell=True)
246
-
247
-
248
- def write_video(save_video_path, crop_seq, audio_path=None, merge_audio=False, use_ffmpeg=False):
249
- # Writes the clip video to disk. Merges with audio if enabled
250
- tmp_path = save_video_path.replace('.mp4','_temp.mp4') if merge_audio else save_video_path
251
- vid_writer = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*'mp4v'), 25, (96, 96))
252
- for ci in crop_seq:
253
- vid_writer.write(ci)
254
- vid_writer.release()
255
- if use_ffmpeg and not merge_audio:
256
- convert_ffmpeg(tmp_path)
257
-
258
- if merge_audio:
259
- merge_audio_video(tmp_path, audio_path, save_video_path)
260
-
261
-
262
-