|
"""Speaker embedding obtained via speaker verification training. |
|
- feature dimension: 256 |
|
- source: https://github.com/metavoiceio/metavoice-src |
|
""" |
|
import os |
|
import subprocess |
|
from os.path import join as p_join |
|
from typing import Optional |
|
|
|
import librosa |
|
from librosa import feature |
|
import numpy as np |
|
import torch |
|
from torch import nn |
|
|
|
|
|
checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt.pt" |
|
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt.pt") |
|
|
|
|
|
def wget(url: str, output_file: Optional[str] = None): |
|
os.makedirs(os.path.dirname(output_file), exist_ok=True) |
|
subprocess.run(["wget", url, "-O", output_file]) |
|
if not os.path.exists(output_file): |
|
raise ValueError(f"failed to download {url}") |
|
|
|
|
|
class MetaVoiceSE(nn.Module): |
|
|
|
mel_window_length = 25 |
|
mel_window_step = 10 |
|
mel_n_channels = 40 |
|
sampling_rate = 16000 |
|
partials_n_frames = 160 |
|
model_hidden_size = 256 |
|
model_embedding_size = 256 |
|
model_num_layers = 3 |
|
|
|
def __init__(self): |
|
super().__init__() |
|
if not os.path.exists(model_weight): |
|
wget(checkpoint_url, model_weight) |
|
|
|
self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True) |
|
self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size) |
|
self.relu = nn.ReLU() |
|
|
|
self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False) |
|
|
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
self.to(self.device) |
|
self.eval() |
|
|
|
def compute_partial_slices(self, n_samples: int, rate, min_coverage): |
|
|
|
samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000)) |
|
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) |
|
frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame)) |
|
|
|
wav_slices, mel_slices = [], [] |
|
steps = max(1, n_frames - self.partials_n_frames + frame_step + 1) |
|
for i in range(0, steps, frame_step): |
|
mel_range = np.array([i, i + self.partials_n_frames]) |
|
wav_range = mel_range * samples_per_frame |
|
mel_slices.append(slice(*mel_range)) |
|
wav_slices.append(slice(*wav_range)) |
|
|
|
last_wav_range = wav_slices[-1] |
|
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) |
|
if coverage < min_coverage and len(mel_slices) > 1: |
|
return wav_slices[:-1], mel_slices[:-1] |
|
return wav_slices, mel_slices |
|
|
|
def get_speaker_embedding(self, |
|
wav: np.ndarray, |
|
sampling_rate: Optional[int] = None, |
|
rate: float = 1.3, |
|
min_coverage: float = 0.75) -> np.ndarray: |
|
if sampling_rate != self.sampling_rate: |
|
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate) |
|
wav, _ = librosa.effects.trim(wav, top_db=20) |
|
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage) |
|
max_wave_length = wav_slices[-1].stop |
|
if max_wave_length >= len(wav): |
|
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") |
|
|
|
frames = feature.melspectrogram( |
|
y=wav, |
|
sr=self.sampling_rate, |
|
n_fft=int(self.sampling_rate * self.mel_window_length / 1000), |
|
hop_length=int(self.sampling_rate * self.mel_window_step / 1000), |
|
n_mels=self.mel_n_channels, |
|
) |
|
mel = frames.astype(np.float32).T |
|
mel = np.array([mel[s] for s in mel_slices]) |
|
|
|
with torch.no_grad(): |
|
mel = torch.from_numpy(mel).to(self.device) |
|
_, (hidden, _) = self.lstm(mel) |
|
embeds_raw = self.relu(self.linear(hidden[-1])) |
|
partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) |
|
partial_embeds = partial_embeds.cpu().numpy() |
|
raw_embed = np.mean(partial_embeds, axis=0) |
|
return raw_embed / np.linalg.norm(raw_embed, 2) |
|
|