File size: 4,544 Bytes
65f033e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85fe221
 
65f033e
 
 
 
 
 
 
 
 
4325a58
65f033e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
"""Speaker embedding obtained via speaker verification training.
- feature dimension: 256
- source: https://github.com/metavoiceio/metavoice-src
"""
import os
import subprocess
from os.path import join as p_join
from typing import Optional

import librosa
from librosa import feature
import numpy as np
import torch
from torch import nn


checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")


def wget(url: str, output_file: Optional[str] = None):
    os.makedirs(os.path.dirname(output_file), exist_ok=True)
    subprocess.run(["wget", url, "-O", output_file])
    if not os.path.exists(output_file):
        raise ValueError(f"failed to download {url}")


class MetaVoiceEmbedding(nn.Module):

    mel_window_length = 25
    mel_window_step = 10
    mel_n_channels = 40
    sampling_rate = 16000
    partials_n_frames = 160
    model_hidden_size = 256
    model_embedding_size = 256
    model_num_layers = 3

    def __init__(self):
        super().__init__()
        if not os.path.exists(model_weight):
            wget(checkpoint_url, model_weight)
        # Define the network
        self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
        self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
        self.relu = nn.ReLU()
        # Load weight
        self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
        # Get the target device
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.to(self.device)
        self.eval()

    def compute_partial_slices(self, n_samples: int, rate, min_coverage):
        # Compute how many frames separate two partial utterances
        samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
        n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
        frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
        # Compute the slices
        wav_slices, mel_slices = [], []
        steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
        for i in range(0, steps, frame_step):
            mel_range = np.array([i, i + self.partials_n_frames])
            wav_range = mel_range * samples_per_frame
            mel_slices.append(slice(*mel_range))
            wav_slices.append(slice(*wav_range))
        # Evaluate whether extra padding is warranted or not
        last_wav_range = wav_slices[-1]
        coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
        if coverage < min_coverage and len(mel_slices) > 1:
            return wav_slices[:-1], mel_slices[:-1]
        return wav_slices, mel_slices

    def get_speaker_embedding(self,
                              wav: np.ndarray,
                              sampling_rate: Optional[int] = None,
                              rate: float = 1.3,
                              min_coverage: float = 0.75) -> np.ndarray:
        if sampling_rate != self.sampling_rate:
            wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
        wav, _ = librosa.effects.trim(wav, top_db=20)
        wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
        max_wave_length = wav_slices[-1].stop
        if max_wave_length >= len(wav):
            wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
        # Wav -> Mel spectrogram
        frames = feature.melspectrogram(
            y=wav,
            sr=self.sampling_rate,
            n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
            hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
            n_mels=self.mel_n_channels,
        )
        mel = frames.astype(np.float32).T
        mel = np.array([mel[s] for s in mel_slices])
        # inference
        with torch.no_grad():
            mel = torch.from_numpy(mel).to(self.device)
            _, (hidden, _) = self.lstm(mel)
            embeds_raw = self.relu(self.linear(hidden[-1]))
            partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
        partial_embeds = partial_embeds.cpu().numpy()
        raw_embed = np.mean(partial_embeds, axis=0)
        return raw_embed / np.linalg.norm(raw_embed, 2)