File size: 1,408 Bytes
df07554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import Loader
import numpy as np
import os

from sklearn.model_selection import train_test_split

TEST_FRAC = 0.2
RANDOM_SEED = 42

loader = Loader.GridLoader()
# we'll leave out speaker 34 for testing
video_paths = loader.load_video_paths(
    verbose=True, fetch_all_paths=False,
    verify_phonemes_length=True
)

new_video_paths = []
for video_path in video_paths:
    sentence = os.path.basename(video_path)
    sentence, _ = os.path.splitext(sentence)
    speaker_name = os.path.basename(os.path.dirname(video_path))
    speaker_no = int(speaker_name[1:])
    cache_key = (speaker_no, sentence)
    new_video_paths.append(video_path)


video_paths = new_video_paths
train_paths, validate_paths, _, _ = train_test_split(
    video_paths, video_paths,
    test_size=TEST_FRAC, random_state=RANDOM_SEED
)


def get_speakers(filepaths):
    return sorted(list(set([
        os.path.basename(os.path.dirname(x)) for x in filepaths
    ])))


train_paths = sorted(train_paths)
validate_paths = sorted(validate_paths)

print(f'ALL_SPEAKERS {get_speakers(video_paths)}')
print(f'TRAIN_PATHS: {len(train_paths)}')
print(f'TRAIN_SPEAKERS: {get_speakers(train_paths)}')
print(f'VALIDATE_PATHS: {len(validate_paths)}')
print(f'VALIDATE_SPEAKERS: {get_speakers(validate_paths)}')

open('data/phonemes_train.txt', 'w').write('\n'.join(train_paths))
open('data/phonemes_val.txt', 'w').write('\n'.join(validate_paths))