File size: 1,446 Bytes
df07554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os.path
import sys

from scripts import extract_lip
from multiprocessing import Pool, Process, Queue
from Loader import GridLoader
from dataset import GridDataset
from models.LipNet import LipNet

PARALLEL = False

loader = GridLoader()
video_paths = loader.load_video_paths(fetch_all_paths=False)
image_dirs, anno_dirs, target_dirs = [], [], []

for k in range(1, 35):
    speaker_name = f's{k}'
    image_dirpath = f'lip/GRID_imgs/{speaker_name}'
    annos_dirpath = f'lip/GRID_aligns/{speaker_name}'
    target_dirpath = f'lip/GRID_lip_imgs/{speaker_name}'

    if not os.path.exists(image_dirpath):
        continue

    sentences = os.listdir(image_dirpath)
    for sentence in sentences:
        sentence_dir = f'{image_dirpath}/{sentence}'
        print('SS', sentence_dir)

        image_dirs.append(sentence_dir)
        anno_dirs.append(annos_dirpath)
        target_dirs.append(target_dirpath)

print(video_paths[:10])

for dst in target_dirs:
    if not os.path.exists(dst):
        os.makedirs(dst)

data = list(zip(image_dirs, anno_dirs, target_dirs))
processes = []
n_p = 8
bs = len(data) // n_p

if PARALLEL:
    for i in range(n_p):
        if i == n_p - 1:
            bs = len(data)

        p = Process(target=extract_lip.run, args=(data[:bs],))
        data = data[bs:]
        p.start()
        processes.append(p)

    assert (len(data) == 0)
    for p in processes:
        p.join()

else:
    extract_lip.run(data)