init
Browse files- attach_speaker_embedding_s2s.py +156 -0
- main_s2s.sh +23 -2
- tokenize_dataset_s2s.py +0 -1
attach_speaker_embedding_s2s.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
from os.path import join as p_join
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
import librosa
|
6 |
+
from librosa import feature
|
7 |
+
import numpy as np
|
8 |
+
from torch import nn
|
9 |
+
|
10 |
+
|
11 |
+
import os
|
12 |
+
from os.path import expanduser
|
13 |
+
|
14 |
+
import shutil
|
15 |
+
import torch
|
16 |
+
from soundfile import LibsndfileError
|
17 |
+
from datasets import load_dataset, DatasetDict, Audio
|
18 |
+
|
19 |
+
direction = os.getenv("DIRECTION", "enA-jaA")
|
20 |
+
sides = set(direction.split("-"))
|
21 |
+
dataset_id = os.getenv("DATASET_ID", 0)
|
22 |
+
num_proc = int(os.getenv("NUM_PROC", 1))
|
23 |
+
hf_org = os.getenv("HF_ORG", "asahi417")
|
24 |
+
hf_dataset = f"seamless-align-{direction}"
|
25 |
+
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
26 |
+
audio_loader = Audio()
|
27 |
+
|
28 |
+
checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
|
29 |
+
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")
|
30 |
+
|
31 |
+
|
32 |
+
def wget(url: str, output_file: Optional[str] = None):
|
33 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
34 |
+
subprocess.run(["wget", url, "-O", output_file])
|
35 |
+
if not os.path.exists(output_file):
|
36 |
+
raise ValueError(f"failed to download {url}")
|
37 |
+
|
38 |
+
|
39 |
+
class MetaVoiceSE(nn.Module):
|
40 |
+
|
41 |
+
mel_window_length = 25
|
42 |
+
mel_window_step = 10
|
43 |
+
mel_n_channels = 40
|
44 |
+
sampling_rate = 16000
|
45 |
+
partials_n_frames = 160
|
46 |
+
model_hidden_size = 256
|
47 |
+
model_embedding_size = 256
|
48 |
+
model_num_layers = 3
|
49 |
+
|
50 |
+
def __init__(self):
|
51 |
+
super().__init__()
|
52 |
+
if not os.path.exists(model_weight):
|
53 |
+
wget(checkpoint_url, model_weight)
|
54 |
+
# Define the network
|
55 |
+
self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
|
56 |
+
self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
|
57 |
+
self.relu = nn.ReLU()
|
58 |
+
# Load weight
|
59 |
+
self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
|
60 |
+
# Get the target device
|
61 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
62 |
+
self.to(self.device)
|
63 |
+
self.eval()
|
64 |
+
|
65 |
+
def compute_partial_slices(self, n_samples: int, rate, min_coverage):
|
66 |
+
# Compute how many frames separate two partial utterances
|
67 |
+
samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
|
68 |
+
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
|
69 |
+
frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
|
70 |
+
# Compute the slices
|
71 |
+
wav_slices, mel_slices = [], []
|
72 |
+
steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
|
73 |
+
for i in range(0, steps, frame_step):
|
74 |
+
mel_range = np.array([i, i + self.partials_n_frames])
|
75 |
+
wav_range = mel_range * samples_per_frame
|
76 |
+
mel_slices.append(slice(*mel_range))
|
77 |
+
wav_slices.append(slice(*wav_range))
|
78 |
+
# Evaluate whether extra padding is warranted or not
|
79 |
+
last_wav_range = wav_slices[-1]
|
80 |
+
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
|
81 |
+
if coverage < min_coverage and len(mel_slices) > 1:
|
82 |
+
return wav_slices[:-1], mel_slices[:-1]
|
83 |
+
return wav_slices, mel_slices
|
84 |
+
|
85 |
+
def get_speaker_embedding(self,
|
86 |
+
wav: np.ndarray,
|
87 |
+
sampling_rate: Optional[int] = None,
|
88 |
+
rate: float = 1.3,
|
89 |
+
min_coverage: float = 0.75) -> np.ndarray:
|
90 |
+
if sampling_rate != self.sampling_rate:
|
91 |
+
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
|
92 |
+
wav, _ = librosa.effects.trim(wav, top_db=20)
|
93 |
+
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
|
94 |
+
max_wave_length = wav_slices[-1].stop
|
95 |
+
if max_wave_length >= len(wav):
|
96 |
+
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
|
97 |
+
# Wav -> Mel spectrogram
|
98 |
+
frames = feature.melspectrogram(
|
99 |
+
y=wav,
|
100 |
+
sr=self.sampling_rate,
|
101 |
+
n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
|
102 |
+
hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
|
103 |
+
n_mels=self.mel_n_channels,
|
104 |
+
)
|
105 |
+
mel = frames.astype(np.float32).T
|
106 |
+
mel = np.array([mel[s] for s in mel_slices])
|
107 |
+
# inference
|
108 |
+
with torch.no_grad():
|
109 |
+
mel = torch.from_numpy(mel).to(self.device)
|
110 |
+
_, (hidden, _) = self.lstm(mel)
|
111 |
+
embeds_raw = self.relu(self.linear(hidden[-1]))
|
112 |
+
partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
|
113 |
+
partial_embeds = partial_embeds.cpu().numpy()
|
114 |
+
raw_embed = np.mean(partial_embeds, axis=0)
|
115 |
+
return raw_embed / np.linalg.norm(raw_embed, 2)
|
116 |
+
|
117 |
+
|
118 |
+
speaker_embedder = MetaVoiceSE()
|
119 |
+
|
120 |
+
|
121 |
+
def error_file(example):
|
122 |
+
for side in sides:
|
123 |
+
try:
|
124 |
+
audio_loader.decode_example(example[f"{side}.audio"])
|
125 |
+
except LibsndfileError:
|
126 |
+
return False
|
127 |
+
return True
|
128 |
+
|
129 |
+
|
130 |
+
print(f"Num examples: {len(dataset)}")
|
131 |
+
for s in sides:
|
132 |
+
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
|
133 |
+
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
|
134 |
+
for s in sides:
|
135 |
+
dataset = dataset.cast_column(f"{s}.audio", Audio())
|
136 |
+
print(f"Num examples (after filtering): {len(dataset)}")
|
137 |
+
|
138 |
+
|
139 |
+
def speaker_embedding(example):
|
140 |
+
for side in sides:
|
141 |
+
example[f"{side}.audio.speaker_embedding"] = speaker_embedder.get_speaker_embedding(
|
142 |
+
example[f"{side}.audio"]["array"], example[f"{side}.audio"]["sampling_rate"]
|
143 |
+
)
|
144 |
+
return example
|
145 |
+
|
146 |
+
|
147 |
+
dataset = dataset.map(
|
148 |
+
function=speaker_embedding,
|
149 |
+
remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
|
150 |
+
num_proc=num_proc,
|
151 |
+
desc="attach speaker embedding dataset"
|
152 |
+
)
|
153 |
+
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.speaker-embedding.metavoice", config_name=f"subset_{dataset_id}")
|
154 |
+
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
155 |
+
if os.path.exists(cache_dir):
|
156 |
+
shutil.rmtree(cache_dir)
|
main_s2s.sh
CHANGED
@@ -64,7 +64,7 @@ export LINE_NO_START=0
|
|
64 |
export LINE_NO_END=10
|
65 |
python fetch_dataset_s2s.py
|
66 |
# main
|
67 |
-
for i in $(seq
|
68 |
do
|
69 |
export N_POOL=15
|
70 |
export DATASET_ID=${i}
|
@@ -85,7 +85,7 @@ do
|
|
85 |
python fetch_dataset_s2s.py
|
86 |
done
|
87 |
# tokenize
|
88 |
-
for i in $(seq
|
89 |
do
|
90 |
export DATASET_ID=${i}
|
91 |
export DIRECTION="enA-viA"
|
@@ -201,3 +201,24 @@ export DIRECTION="deA-enA"
|
|
201 |
export LINE_NO_START=0
|
202 |
export LINE_NO_END=10
|
203 |
python fetch_dataset_s2s.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
export LINE_NO_END=10
|
65 |
python fetch_dataset_s2s.py
|
66 |
# main
|
67 |
+
for i in $(seq 1 149);
|
68 |
do
|
69 |
export N_POOL=15
|
70 |
export DATASET_ID=${i}
|
|
|
85 |
python fetch_dataset_s2s.py
|
86 |
done
|
87 |
# tokenize
|
88 |
+
for i in $(seq 120 140);
|
89 |
do
|
90 |
export DATASET_ID=${i}
|
91 |
export DIRECTION="enA-viA"
|
|
|
201 |
export LINE_NO_START=0
|
202 |
export LINE_NO_END=10
|
203 |
python fetch_dataset_s2s.py
|
204 |
+
# main
|
205 |
+
for i in $(seq 1 200);
|
206 |
+
do
|
207 |
+
export N_POOL=15
|
208 |
+
export DATASET_ID=${i}
|
209 |
+
export DIRECTION="deA-enA"
|
210 |
+
export LINE_NO_START=$(((DATASET_ID-1) * 2500))
|
211 |
+
export LINE_NO_END=$((DATASET_ID * 2500))
|
212 |
+
echo ${LINE_NO_START}
|
213 |
+
python fetch_dataset_s2s.py
|
214 |
+
done
|
215 |
+
for i in $(seq 201 394);
|
216 |
+
do
|
217 |
+
export N_POOL=15
|
218 |
+
export DATASET_ID=${i}
|
219 |
+
export DIRECTION="deA-enA"
|
220 |
+
export LINE_NO_START=$(((DATASET_ID-1) * 2500))
|
221 |
+
export LINE_NO_END=$((DATASET_ID * 2500))
|
222 |
+
echo ${LINE_NO_START}
|
223 |
+
python fetch_dataset_s2s.py
|
224 |
+
done
|
tokenize_dataset_s2s.py
CHANGED
@@ -11,7 +11,6 @@ from encodec_audio_tokenizer import EncodecTokenizer
|
|
11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
12 |
sides = set(direction.split("-"))
|
13 |
dataset_id = os.getenv("DATASET_ID", 0)
|
14 |
-
batch_size = int(os.getenv("BATCH_SIZE", 64))
|
15 |
num_proc = int(os.getenv("NUM_PROC", 1))
|
16 |
hf_org = os.getenv("HF_ORG", "asahi417")
|
17 |
hf_dataset = f"seamless-align-{direction}"
|
|
|
11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
12 |
sides = set(direction.split("-"))
|
13 |
dataset_id = os.getenv("DATASET_ID", 0)
|
|
|
14 |
num_proc = int(os.getenv("NUM_PROC", 1))
|
15 |
hf_org = os.getenv("HF_ORG", "asahi417")
|
16 |
hf_dataset = f"seamless-align-{direction}"
|