asahi417 commited on
Commit
65f033e
1 Parent(s): e56ab10
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/experiment-speaker-embedding.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.9 (distil) (8)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="2">
8
+ <item index="0" class="java.lang.String" itemvalue="pandas" />
9
+ <item index="1" class="java.lang.String" itemvalue="requests" />
10
+ </list>
11
+ </value>
12
+ </option>
13
+ </inspection_tool>
14
+ </profile>
15
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.9 (distil) (8)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (distil) (8)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/experiment-speaker-embedding.iml" filepath="$PROJECT_DIR$/.idea/experiment-speaker-embedding.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
model_meta_voice.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Speaker embedding obtained via speaker verification training.
2
+ - feature dimension: 256
3
+ - source: https://github.com/metavoiceio/metavoice-src
4
+ """
5
+ import os
6
+ import subprocess
7
+ from os.path import join as p_join
8
+ from typing import Optional
9
+
10
+ import librosa
11
+ from librosa import feature
12
+ import numpy as np
13
+ import torch
14
+ from torch import nn
15
+
16
+
17
+ checkpoint_url = "https://huggingface.co/datasets/asahi417/experiment-speaker-embedding/resolve/main/speaker_encoder.pt"
18
+ model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "speaker_encoder.pt")
19
+
20
+
21
+ def wget(url: str, output_file: Optional[str] = None):
22
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
23
+ subprocess.run(["wget", url, "-O", output_file])
24
+ if not os.path.exists(output_file):
25
+ raise ValueError(f"failed to download {url}")
26
+
27
+
28
+ class MetaVoiceSE(nn.Module):
29
+
30
+ mel_window_length = 25
31
+ mel_window_step = 10
32
+ mel_n_channels = 40
33
+ sampling_rate = 16000
34
+ partials_n_frames = 160
35
+ model_hidden_size = 256
36
+ model_embedding_size = 256
37
+ model_num_layers = 3
38
+
39
+ def __init__(self):
40
+ super().__init__()
41
+ if not os.path.exists(model_weight):
42
+ wget(checkpoint_url, model_weight)
43
+ # Define the network
44
+ self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
45
+ self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
46
+ self.relu = nn.ReLU()
47
+ # Load weight
48
+ self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
49
+ # Get the target device
50
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
51
+ self.to(self.device)
52
+ self.eval()
53
+
54
+ def compute_partial_slices(self, n_samples: int, rate, min_coverage):
55
+ # Compute how many frames separate two partial utterances
56
+ samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
57
+ n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
58
+ frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
59
+ # Compute the slices
60
+ wav_slices, mel_slices = [], []
61
+ steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
62
+ for i in range(0, steps, frame_step):
63
+ mel_range = np.array([i, i + self.partials_n_frames])
64
+ wav_range = mel_range * samples_per_frame
65
+ mel_slices.append(slice(*mel_range))
66
+ wav_slices.append(slice(*wav_range))
67
+ # Evaluate whether extra padding is warranted or not
68
+ last_wav_range = wav_slices[-1]
69
+ coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
70
+ if coverage < min_coverage and len(mel_slices) > 1:
71
+ return wav_slices[:-1], mel_slices[:-1]
72
+ return wav_slices, mel_slices
73
+
74
+ def get_speaker_embedding(self,
75
+ wav: np.ndarray,
76
+ sampling_rate: Optional[int] = None,
77
+ rate: float = 1.3,
78
+ min_coverage: float = 0.75) -> np.ndarray:
79
+ if sampling_rate != self.sampling_rate:
80
+ wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
81
+ wav, _ = librosa.effects.trim(wav, top_db=20)
82
+ wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
83
+ max_wave_length = wav_slices[-1].stop
84
+ if max_wave_length >= len(wav):
85
+ wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
86
+ # Wav -> Mel spectrogram
87
+ frames = feature.melspectrogram(
88
+ y=wav,
89
+ sr=self.sampling_rate,
90
+ n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
91
+ hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
92
+ n_mels=self.mel_n_channels,
93
+ )
94
+ mel = frames.astype(np.float32).T
95
+ mel = np.array([mel[s] for s in mel_slices])
96
+ # inference
97
+ with torch.no_grad():
98
+ mel = torch.from_numpy(mel).to(self.device)
99
+ _, (hidden, _) = self.lstm(mel)
100
+ embeds_raw = self.relu(self.linear(hidden[-1]))
101
+ partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
102
+ partial_embeds = partial_embeds.cpu().numpy()
103
+ raw_embed = np.mean(partial_embeds, axis=0)
104
+ return raw_embed / np.linalg.norm(raw_embed, 2)
model_pyannote_embedding.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pyannote speaker embedding model.
2
+ - pip install pyannote.audio
3
+ - feature dimension: 512
4
+ - source: https://huggingface.co/pyannote/embedding
5
+ """
6
+ from typing import Optional, Union, Tuple
7
+ import torch
8
+ import numpy as np
9
+ from pyannote.audio import Model
10
+ from pyannote.audio import Inference
11
+ from pyannote.audio.core.inference import fix_reproducibility, map_with_specifications
12
+
13
+
14
+ class PyannoteSE:
15
+
16
+ def __init__(self):
17
+ self.model = Model.from_pretrained("pyannote/embedding")
18
+ self.inference = Inference(self.model, window="whole")
19
+
20
+ def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
21
+ wav = torch.as_tensor(wav.reshape(1, -1))
22
+ fix_reproducibility(self.inference.device)
23
+ if self.inference.window == "sliding":
24
+ return self.inference.slide(wav, sampling_rate, hook=None)
25
+
26
+ outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.inference.infer(wav[None])
27
+
28
+ def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray:
29
+ return outputs[0]
30
+
31
+ return map_with_specifications(
32
+ self.model.specifications, __first_sample, outputs
33
+ )
model_w2v_bert.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Meta's w2vBERT based speaker embedding.
2
+ - feature dimension: 1024
3
+ - source: https://huggingface.co/facebook/w2v-bert-2.0
4
+ """
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import librosa
9
+ import numpy as np
10
+ from transformers import Wav2Vec2BertModel, AutoFeatureExtractor
11
+
12
+
13
+ class W2VBertSE:
14
+ def __init__(self):
15
+ self.processor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
16
+ self.model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0")
17
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ self.model.to(self.device)
19
+ self.model.eval()
20
+
21
+ def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
22
+ # audio file is decoded on the fly
23
+ if sampling_rate != self.processor.sampling_rate:
24
+ wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.sampling_rate)
25
+ inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
26
+ with torch.no_grad():
27
+ outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
28
+ return outputs.last_hidden_state.mean(1).cpu().numpy()[0]
sample.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9378ee67487807492fe471b1c82384ac275198b6022da5ba01995af77dce90a
3
+ size 465004
test.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ from model_meta_voice import MetaVoiceSE
3
+ from model_pyannote_embedding import PyannoteSE
4
+ from model_w2v_bert import W2VBertSE
5
+
6
+
7
+ def test():
8
+ wav, sr = librosa.load("sample.wav")
9
+ print("MetaVoiceSE")
10
+ model = MetaVoiceSE()
11
+ v = model.get_speaker_embedding(wav, sr)
12
+ print(v.shape)
13
+ print("PyannoteSE")
14
+ model = PyannoteSE()
15
+ v = model.get_speaker_embedding(wav, sr)
16
+ print(v.shape)
17
+ print("W2VBertSE")
18
+ model = W2VBertSE()
19
+ v = model.get_speaker_embedding(wav, sr)
20
+ print(v.shape)
21
+
22
+
23
+ if __name__ == '__main__':
24
+ test()