torchnet / generate_phoneme_train_split.py
milselarch's picture
push to main
df07554
import Loader
import numpy as np
import os
from sklearn.model_selection import train_test_split
TEST_FRAC = 0.2
RANDOM_SEED = 42
loader = Loader.GridLoader()
# we'll leave out speaker 34 for testing
video_paths = loader.load_video_paths(
verbose=True, fetch_all_paths=False,
verify_phonemes_length=True
)
new_video_paths = []
for video_path in video_paths:
sentence = os.path.basename(video_path)
sentence, _ = os.path.splitext(sentence)
speaker_name = os.path.basename(os.path.dirname(video_path))
speaker_no = int(speaker_name[1:])
cache_key = (speaker_no, sentence)
new_video_paths.append(video_path)
video_paths = new_video_paths
train_paths, validate_paths, _, _ = train_test_split(
video_paths, video_paths,
test_size=TEST_FRAC, random_state=RANDOM_SEED
)
def get_speakers(filepaths):
return sorted(list(set([
os.path.basename(os.path.dirname(x)) for x in filepaths
])))
train_paths = sorted(train_paths)
validate_paths = sorted(validate_paths)
print(f'ALL_SPEAKERS {get_speakers(video_paths)}')
print(f'TRAIN_PATHS: {len(train_paths)}')
print(f'TRAIN_SPEAKERS: {get_speakers(train_paths)}')
print(f'VALIDATE_PATHS: {len(validate_paths)}')
print(f'VALIDATE_SPEAKERS: {get_speakers(validate_paths)}')
open('data/phonemes_train.txt', 'w').write('\n'.join(train_paths))
open('data/phonemes_val.txt', 'w').write('\n'.join(validate_paths))