csukuangfj
commited on
Commit
•
f53860a
1
Parent(s):
cef699e
add models
Browse files- .gitattributes +1 -0
- GigaAM%20License_NC.pdf +1 -0
- README.md +10 -0
- decoder.onnx +3 -0
- encoder.int8.onnx +3 -0
- export-onnx-rnnt.py +119 -0
- joiner.onnx +3 -0
- run-rnnt.sh +50 -0
- test-onnx-rnnt.py +270 -0
- test_wavs/example.wav +3 -0
- test_wavs/long_example.wav +3 -0
- tokens.txt +513 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
GigaAM%20License_NC.pdf
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Entry not found
|
README.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Introduction
|
2 |
+
|
3 |
+
This folder contains scripts for converting models from
|
4 |
+
https://github.com/salute-developers/GigaAM
|
5 |
+
to sherpa-onnx.
|
6 |
+
|
7 |
+
The ASR models are for Russian speech recognition in this folder.
|
8 |
+
|
9 |
+
Please see the license of the models at
|
10 |
+
https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf
|
decoder.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5bc934fb277e777c055416bff25cba175865163fdc806fda8dd716001b625885
|
3 |
+
size 3945078
|
encoder.int8.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b8543fbae136cdca47d023deb6d2893dc97e40ba001c3491b84bfef2222dcd9
|
3 |
+
size 274772325
|
export-onnx-rnnt.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
|
3 |
+
|
4 |
+
from typing import Dict
|
5 |
+
|
6 |
+
import onnx
|
7 |
+
import torch
|
8 |
+
import torchaudio
|
9 |
+
from nemo.collections.asr.models import EncDecRNNTBPEModel
|
10 |
+
from nemo.collections.asr.modules.audio_preprocessing import (
|
11 |
+
AudioToMelSpectrogramPreprocessor as NeMoAudioToMelSpectrogramPreprocessor,
|
12 |
+
)
|
13 |
+
from nemo.collections.asr.parts.preprocessing.features import (
|
14 |
+
FilterbankFeaturesTA as NeMoFilterbankFeaturesTA,
|
15 |
+
)
|
16 |
+
from onnxruntime.quantization import QuantType, quantize_dynamic
|
17 |
+
|
18 |
+
|
19 |
+
def add_meta_data(filename: str, meta_data: Dict[str, str]):
|
20 |
+
"""Add meta data to an ONNX model. It is changed in-place.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
filename:
|
24 |
+
Filename of the ONNX model to be changed.
|
25 |
+
meta_data:
|
26 |
+
Key-value pairs.
|
27 |
+
"""
|
28 |
+
model = onnx.load(filename)
|
29 |
+
while len(model.metadata_props):
|
30 |
+
model.metadata_props.pop()
|
31 |
+
|
32 |
+
for key, value in meta_data.items():
|
33 |
+
meta = model.metadata_props.add()
|
34 |
+
meta.key = key
|
35 |
+
meta.value = str(value)
|
36 |
+
|
37 |
+
onnx.save(model, filename)
|
38 |
+
|
39 |
+
|
40 |
+
class FilterbankFeaturesTA(NeMoFilterbankFeaturesTA):
|
41 |
+
def __init__(self, mel_scale: str = "htk", wkwargs=None, **kwargs):
|
42 |
+
if "window_size" in kwargs:
|
43 |
+
del kwargs["window_size"]
|
44 |
+
if "window_stride" in kwargs:
|
45 |
+
del kwargs["window_stride"]
|
46 |
+
|
47 |
+
super().__init__(**kwargs)
|
48 |
+
|
49 |
+
self._mel_spec_extractor: torchaudio.transforms.MelSpectrogram = (
|
50 |
+
torchaudio.transforms.MelSpectrogram(
|
51 |
+
sample_rate=self._sample_rate,
|
52 |
+
win_length=self.win_length,
|
53 |
+
hop_length=self.hop_length,
|
54 |
+
n_mels=kwargs["nfilt"],
|
55 |
+
window_fn=self.torch_windows[kwargs["window"]],
|
56 |
+
mel_scale=mel_scale,
|
57 |
+
norm=kwargs["mel_norm"],
|
58 |
+
n_fft=kwargs["n_fft"],
|
59 |
+
f_max=kwargs.get("highfreq", None),
|
60 |
+
f_min=kwargs.get("lowfreq", 0),
|
61 |
+
wkwargs=wkwargs,
|
62 |
+
)
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
class AudioToMelSpectrogramPreprocessor(NeMoAudioToMelSpectrogramPreprocessor):
|
67 |
+
def __init__(self, mel_scale: str = "htk", **kwargs):
|
68 |
+
super().__init__(**kwargs)
|
69 |
+
kwargs["nfilt"] = kwargs["features"]
|
70 |
+
del kwargs["features"]
|
71 |
+
self.featurizer = (
|
72 |
+
FilterbankFeaturesTA( # Deprecated arguments; kept for config compatibility
|
73 |
+
mel_scale=mel_scale,
|
74 |
+
**kwargs,
|
75 |
+
)
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
@torch.no_grad()
|
80 |
+
def main():
|
81 |
+
model = EncDecRNNTBPEModel.from_config_file("./rnnt_model_config.yaml")
|
82 |
+
ckpt = torch.load("./rnnt_model_weights.ckpt", map_location="cpu")
|
83 |
+
model.load_state_dict(ckpt, strict=False)
|
84 |
+
model.eval()
|
85 |
+
|
86 |
+
with open("./tokens.txt", "w", encoding="utf-8") as f:
|
87 |
+
for i, s in enumerate(model.joint.vocabulary):
|
88 |
+
f.write(f"{s} {i}\n")
|
89 |
+
f.write(f"<blk> {i+1}\n")
|
90 |
+
print("Saved to tokens.txt")
|
91 |
+
|
92 |
+
model.encoder.export("encoder.onnx")
|
93 |
+
model.decoder.export("decoder.onnx")
|
94 |
+
model.joint.export("joiner.onnx")
|
95 |
+
|
96 |
+
meta_data = {
|
97 |
+
"vocab_size": model.decoder.vocab_size, # not including the blank
|
98 |
+
"pred_rnn_layers": model.decoder.pred_rnn_layers,
|
99 |
+
"pred_hidden": model.decoder.pred_hidden,
|
100 |
+
"normalize_type": "",
|
101 |
+
"subsampling_factor": 4,
|
102 |
+
"model_type": "EncDecRNNTBPEModel",
|
103 |
+
"version": "1",
|
104 |
+
"model_author": "https://github.com/salute-developers/GigaAM",
|
105 |
+
"license": "https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf",
|
106 |
+
"language": "Russian",
|
107 |
+
"is_giga_am": 1,
|
108 |
+
}
|
109 |
+
add_meta_data("encoder.onnx", meta_data)
|
110 |
+
|
111 |
+
quantize_dynamic(
|
112 |
+
model_input="encoder.onnx",
|
113 |
+
model_output="encoder.int8.onnx",
|
114 |
+
weight_type=QuantType.QUInt8,
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
if __name__ == "__main__":
|
119 |
+
main()
|
joiner.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbb7135757e3957f9f1473236d429bd44145d76209fb11e7b56e01c85e2dcb83
|
3 |
+
size 2055583
|
run-rnnt.sh
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
|
3 |
+
|
4 |
+
set -ex
|
5 |
+
|
6 |
+
function install_nemo() {
|
7 |
+
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
8 |
+
python3 get-pip.py
|
9 |
+
|
10 |
+
pip install torch==2.4.0 torchaudio==2.4.0 -f https://download.pytorch.org/whl/torch_stable.html
|
11 |
+
|
12 |
+
pip install -qq wget text-unidecode matplotlib>=3.3.2 onnx onnxruntime pybind11 Cython einops kaldi-native-fbank soundfile librosa
|
13 |
+
pip install -qq ipython
|
14 |
+
|
15 |
+
# sudo apt-get install -q -y sox libsndfile1 ffmpeg python3-pip ipython
|
16 |
+
|
17 |
+
BRANCH='main'
|
18 |
+
python3 -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
|
19 |
+
|
20 |
+
pip install numpy==1.26.4
|
21 |
+
}
|
22 |
+
|
23 |
+
function download_files() {
|
24 |
+
# curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/rnnt_model_weights.ckpt
|
25 |
+
# curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/rnnt_model_config.yaml
|
26 |
+
# curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/example.wav
|
27 |
+
# curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/long_example.wav
|
28 |
+
# curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/tokenizer_all_sets.tar
|
29 |
+
|
30 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/rnnt_model_weights.ckpt
|
31 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/rnnt_model_config.yaml
|
32 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/example.wav
|
33 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/long_example.wav
|
34 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM%20License_NC.pdf
|
35 |
+
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/tokenizer_all_sets.tar
|
36 |
+
tar -xf tokenizer_all_sets.tar && rm tokenizer_all_sets.tar
|
37 |
+
ls -lh
|
38 |
+
echo "---"
|
39 |
+
ls -lh tokenizer_all_sets
|
40 |
+
echo "---"
|
41 |
+
}
|
42 |
+
|
43 |
+
install_nemo
|
44 |
+
download_files
|
45 |
+
|
46 |
+
python3 ./export-onnx-rnnt.py
|
47 |
+
ls -lh
|
48 |
+
python3 ./test-onnx-rnnt.py
|
49 |
+
rm -v encoder.onnx
|
50 |
+
ls -lh
|
test-onnx-rnnt.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
|
3 |
+
|
4 |
+
import argparse
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import kaldi_native_fbank as knf
|
8 |
+
import librosa
|
9 |
+
import numpy as np
|
10 |
+
import onnxruntime as ort
|
11 |
+
import soundfile as sf
|
12 |
+
import torch
|
13 |
+
|
14 |
+
|
15 |
+
def create_fbank():
|
16 |
+
opts = knf.FbankOptions()
|
17 |
+
opts.frame_opts.dither = 0
|
18 |
+
opts.frame_opts.remove_dc_offset = False
|
19 |
+
opts.frame_opts.preemph_coeff = 0
|
20 |
+
opts.frame_opts.window_type = "hann"
|
21 |
+
|
22 |
+
# Even though GigaAM uses 400 for fft, here we use 512
|
23 |
+
# since kaldi-native-fbank only support fft for power of 2.
|
24 |
+
opts.frame_opts.round_to_power_of_two = True
|
25 |
+
|
26 |
+
opts.mel_opts.low_freq = 0
|
27 |
+
opts.mel_opts.high_freq = 8000
|
28 |
+
opts.mel_opts.num_bins = 64
|
29 |
+
|
30 |
+
fbank = knf.OnlineFbank(opts)
|
31 |
+
return fbank
|
32 |
+
|
33 |
+
|
34 |
+
def compute_features(audio, fbank):
|
35 |
+
assert len(audio.shape) == 1, audio.shape
|
36 |
+
fbank.accept_waveform(16000, audio)
|
37 |
+
ans = []
|
38 |
+
processed = 0
|
39 |
+
while processed < fbank.num_frames_ready:
|
40 |
+
ans.append(np.array(fbank.get_frame(processed)))
|
41 |
+
processed += 1
|
42 |
+
ans = np.stack(ans)
|
43 |
+
return ans
|
44 |
+
|
45 |
+
|
46 |
+
def display(sess):
|
47 |
+
print("==========Input==========")
|
48 |
+
for i in sess.get_inputs():
|
49 |
+
print(i)
|
50 |
+
print("==========Output==========")
|
51 |
+
for i in sess.get_outputs():
|
52 |
+
print(i)
|
53 |
+
|
54 |
+
|
55 |
+
"""
|
56 |
+
==========Input==========
|
57 |
+
NodeArg(name='audio_signal', type='tensor(float)', shape=['audio_signal_dynamic_axes_1', 64, 'audio_signal_dynamic_axes_2'])
|
58 |
+
NodeArg(name='length', type='tensor(int64)', shape=['length_dynamic_axes_1'])
|
59 |
+
==========Output==========
|
60 |
+
NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 768, 'outputs_dynamic_axes_2'])
|
61 |
+
NodeArg(name='encoded_lengths', type='tensor(int64)', shape=['encoded_lengths_dynamic_axes_1'])
|
62 |
+
==========Input==========
|
63 |
+
NodeArg(name='targets', type='tensor(int32)', shape=['targets_dynamic_axes_1', 'targets_dynamic_axes_2'])
|
64 |
+
NodeArg(name='target_length', type='tensor(int32)', shape=['target_length_dynamic_axes_1'])
|
65 |
+
NodeArg(name='states.1', type='tensor(float)', shape=[1, 'states.1_dim_1', 320])
|
66 |
+
NodeArg(name='onnx::LSTM_3', type='tensor(float)', shape=[1, 1, 320])
|
67 |
+
==========Output==========
|
68 |
+
NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 320, 'outputs_dynamic_axes_2'])
|
69 |
+
NodeArg(name='prednet_lengths', type='tensor(int32)', shape=['prednet_lengths_dynamic_axes_1'])
|
70 |
+
NodeArg(name='states', type='tensor(float)', shape=[1, 'states_dynamic_axes_1', 320])
|
71 |
+
NodeArg(name='74', type='tensor(float)', shape=[1, 'states_dynamic_axes_1', 320])
|
72 |
+
==========Input==========
|
73 |
+
NodeArg(name='encoder_outputs', type='tensor(float)', shape=['encoder_outputs_dynamic_axes_1', 768, 'encoder_outputs_dynamic_axes_2'])
|
74 |
+
NodeArg(name='decoder_outputs', type='tensor(float)', shape=['decoder_outputs_dynamic_axes_1', 320, 'decoder_outputs_dynamic_axes_2'])
|
75 |
+
==========Output==========
|
76 |
+
NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 'outputs_dynamic_axes_2', 'outputs_dynamic_axes_3', 513])
|
77 |
+
"""
|
78 |
+
|
79 |
+
|
80 |
+
class OnnxModel:
|
81 |
+
def __init__(
|
82 |
+
self,
|
83 |
+
encoder: str,
|
84 |
+
decoder: str,
|
85 |
+
joiner: str,
|
86 |
+
):
|
87 |
+
self.init_encoder(encoder)
|
88 |
+
display(self.encoder)
|
89 |
+
self.init_decoder(decoder)
|
90 |
+
display(self.decoder)
|
91 |
+
self.init_joiner(joiner)
|
92 |
+
display(self.joiner)
|
93 |
+
|
94 |
+
def init_encoder(self, encoder):
|
95 |
+
session_opts = ort.SessionOptions()
|
96 |
+
session_opts.inter_op_num_threads = 1
|
97 |
+
session_opts.intra_op_num_threads = 1
|
98 |
+
|
99 |
+
self.encoder = ort.InferenceSession(
|
100 |
+
encoder,
|
101 |
+
sess_options=session_opts,
|
102 |
+
providers=["CPUExecutionProvider"],
|
103 |
+
)
|
104 |
+
|
105 |
+
meta = self.encoder.get_modelmeta().custom_metadata_map
|
106 |
+
self.normalize_type = meta["normalize_type"]
|
107 |
+
print(meta)
|
108 |
+
|
109 |
+
self.pred_rnn_layers = int(meta["pred_rnn_layers"])
|
110 |
+
self.pred_hidden = int(meta["pred_hidden"])
|
111 |
+
|
112 |
+
def init_decoder(self, decoder):
|
113 |
+
session_opts = ort.SessionOptions()
|
114 |
+
session_opts.inter_op_num_threads = 1
|
115 |
+
session_opts.intra_op_num_threads = 1
|
116 |
+
|
117 |
+
self.decoder = ort.InferenceSession(
|
118 |
+
decoder,
|
119 |
+
sess_options=session_opts,
|
120 |
+
providers=["CPUExecutionProvider"],
|
121 |
+
)
|
122 |
+
|
123 |
+
def init_joiner(self, joiner):
|
124 |
+
session_opts = ort.SessionOptions()
|
125 |
+
session_opts.inter_op_num_threads = 1
|
126 |
+
session_opts.intra_op_num_threads = 1
|
127 |
+
|
128 |
+
self.joiner = ort.InferenceSession(
|
129 |
+
joiner,
|
130 |
+
sess_options=session_opts,
|
131 |
+
providers=["CPUExecutionProvider"],
|
132 |
+
)
|
133 |
+
|
134 |
+
def get_decoder_state(self):
|
135 |
+
batch_size = 1
|
136 |
+
state0 = torch.zeros(self.pred_rnn_layers, batch_size, self.pred_hidden).numpy()
|
137 |
+
state1 = torch.zeros(self.pred_rnn_layers, batch_size, self.pred_hidden).numpy()
|
138 |
+
return state0, state1
|
139 |
+
|
140 |
+
def run_encoder(self, x: np.ndarray):
|
141 |
+
# x: (T, C)
|
142 |
+
x = torch.from_numpy(x)
|
143 |
+
x = x.t().unsqueeze(0)
|
144 |
+
# x: [1, C, T]
|
145 |
+
x_lens = torch.tensor([x.shape[-1]], dtype=torch.int64)
|
146 |
+
|
147 |
+
(encoder_out, out_len) = self.encoder.run(
|
148 |
+
[
|
149 |
+
self.encoder.get_outputs()[0].name,
|
150 |
+
self.encoder.get_outputs()[1].name,
|
151 |
+
],
|
152 |
+
{
|
153 |
+
self.encoder.get_inputs()[0].name: x.numpy(),
|
154 |
+
self.encoder.get_inputs()[1].name: x_lens.numpy(),
|
155 |
+
},
|
156 |
+
)
|
157 |
+
# [batch_size, dim, T]
|
158 |
+
return encoder_out
|
159 |
+
|
160 |
+
def run_decoder(
|
161 |
+
self,
|
162 |
+
token: int,
|
163 |
+
state0: np.ndarray,
|
164 |
+
state1: np.ndarray,
|
165 |
+
):
|
166 |
+
target = torch.tensor([[token]], dtype=torch.int32).numpy()
|
167 |
+
target_len = torch.tensor([1], dtype=torch.int32).numpy()
|
168 |
+
|
169 |
+
(
|
170 |
+
decoder_out,
|
171 |
+
decoder_out_length,
|
172 |
+
state0_next,
|
173 |
+
state1_next,
|
174 |
+
) = self.decoder.run(
|
175 |
+
[
|
176 |
+
self.decoder.get_outputs()[0].name,
|
177 |
+
self.decoder.get_outputs()[1].name,
|
178 |
+
self.decoder.get_outputs()[2].name,
|
179 |
+
self.decoder.get_outputs()[3].name,
|
180 |
+
],
|
181 |
+
{
|
182 |
+
self.decoder.get_inputs()[0].name: target,
|
183 |
+
self.decoder.get_inputs()[1].name: target_len,
|
184 |
+
self.decoder.get_inputs()[2].name: state0,
|
185 |
+
self.decoder.get_inputs()[3].name: state1,
|
186 |
+
},
|
187 |
+
)
|
188 |
+
return decoder_out, state0_next, state1_next
|
189 |
+
|
190 |
+
def run_joiner(
|
191 |
+
self,
|
192 |
+
encoder_out: np.ndarray,
|
193 |
+
decoder_out: np.ndarray,
|
194 |
+
):
|
195 |
+
# encoder_out: [batch_size, dim, 1]
|
196 |
+
# decoder_out: [batch_size, dim, 1]
|
197 |
+
logit = self.joiner.run(
|
198 |
+
[
|
199 |
+
self.joiner.get_outputs()[0].name,
|
200 |
+
],
|
201 |
+
{
|
202 |
+
self.joiner.get_inputs()[0].name: encoder_out,
|
203 |
+
self.joiner.get_inputs()[1].name: decoder_out,
|
204 |
+
},
|
205 |
+
)[0]
|
206 |
+
# logit: [batch_size, 1, 1, vocab_size]
|
207 |
+
return logit
|
208 |
+
|
209 |
+
|
210 |
+
def main():
|
211 |
+
model = OnnxModel("encoder.int8.onnx", "decoder.onnx", "joiner.onnx")
|
212 |
+
|
213 |
+
id2token = dict()
|
214 |
+
with open("./tokens.txt", encoding="utf-8") as f:
|
215 |
+
for line in f:
|
216 |
+
t, idx = line.split()
|
217 |
+
id2token[int(idx)] = t
|
218 |
+
|
219 |
+
fbank = create_fbank()
|
220 |
+
audio, sample_rate = sf.read("./example.wav", dtype="float32", always_2d=True)
|
221 |
+
audio = audio[:, 0] # only use the first channel
|
222 |
+
if sample_rate != 16000:
|
223 |
+
audio = librosa.resample(
|
224 |
+
audio,
|
225 |
+
orig_sr=sample_rate,
|
226 |
+
target_sr=16000,
|
227 |
+
)
|
228 |
+
sample_rate = 16000
|
229 |
+
|
230 |
+
tail_padding = np.zeros(sample_rate * 2)
|
231 |
+
|
232 |
+
audio = np.concatenate([audio, tail_padding])
|
233 |
+
|
234 |
+
blank = len(id2token) - 1
|
235 |
+
ans = [blank]
|
236 |
+
state0, state1 = model.get_decoder_state()
|
237 |
+
decoder_out, state0_next, state1_next = model.run_decoder(ans[-1], state0, state1)
|
238 |
+
|
239 |
+
features = compute_features(audio, fbank)
|
240 |
+
print("audio.shape", audio.shape)
|
241 |
+
print("features.shape", features.shape)
|
242 |
+
|
243 |
+
encoder_out = model.run_encoder(features)
|
244 |
+
# encoder_out:[batch_size, dim, T)
|
245 |
+
for t in range(encoder_out.shape[2]):
|
246 |
+
encoder_out_t = encoder_out[:, :, t : t + 1]
|
247 |
+
logits = model.run_joiner(encoder_out_t, decoder_out)
|
248 |
+
logits = torch.from_numpy(logits)
|
249 |
+
logits = logits.squeeze()
|
250 |
+
idx = torch.argmax(logits, dim=-1).item()
|
251 |
+
if idx != blank:
|
252 |
+
ans.append(idx)
|
253 |
+
state0 = state0_next
|
254 |
+
state1 = state1_next
|
255 |
+
decoder_out, state0_next, state1_next = model.run_decoder(
|
256 |
+
ans[-1], state0, state1
|
257 |
+
)
|
258 |
+
|
259 |
+
ans = ans[1:] # remove the first blank
|
260 |
+
print(ans)
|
261 |
+
tokens = [id2token[i] for i in ans]
|
262 |
+
underline = "▁"
|
263 |
+
# underline = b"\xe2\x96\x81".decode()
|
264 |
+
text = "".join(tokens).replace(underline, " ").strip()
|
265 |
+
print("./example.wav")
|
266 |
+
print(text)
|
267 |
+
|
268 |
+
|
269 |
+
if __name__ == "__main__":
|
270 |
+
main()
|
test_wavs/example.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8aaaa18a5098d7c6de0595ae7ac1e64cacd0d4022af3595213bdaf23be77e69
|
3 |
+
size 361324
|
test_wavs/long_example.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1868ece0195dfa9fc2394be24865d1133c8452a8292a397db45ba8c3ed9e01e3
|
3 |
+
size 2280044
|
tokens.txt
ADDED
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<unk> 0
|
2 |
+
▁в 1
|
3 |
+
▁п 2
|
4 |
+
то 3
|
5 |
+
▁с 4
|
6 |
+
▁н 5
|
7 |
+
▁к 6
|
8 |
+
▁д 7
|
9 |
+
▁о 8
|
10 |
+
ст 9
|
11 |
+
но 10
|
12 |
+
▁и 11
|
13 |
+
▁т 12
|
14 |
+
▁м 13
|
15 |
+
ра 14
|
16 |
+
ен 15
|
17 |
+
ро 16
|
18 |
+
▁по 17
|
19 |
+
▁на 18
|
20 |
+
ет 19
|
21 |
+
ли 20
|
22 |
+
ак 21
|
23 |
+
ер 22
|
24 |
+
го 23
|
25 |
+
ть 24
|
26 |
+
▁б 25
|
27 |
+
▁ч 26
|
28 |
+
ль 27
|
29 |
+
на 28
|
30 |
+
ем 29
|
31 |
+
▁э 30
|
32 |
+
ри 31
|
33 |
+
от 32
|
34 |
+
ва 33
|
35 |
+
ни 34
|
36 |
+
▁з 35
|
37 |
+
ла 36
|
38 |
+
▁у 37
|
39 |
+
ко 38
|
40 |
+
ка 39
|
41 |
+
ло 40
|
42 |
+
▁не 41
|
43 |
+
ре 42
|
44 |
+
▁то 43
|
45 |
+
ся 44
|
46 |
+
во 45
|
47 |
+
ны 46
|
48 |
+
▁е 47
|
49 |
+
▁это 48
|
50 |
+
▁ко 49
|
51 |
+
▁что 50
|
52 |
+
▁а 51
|
53 |
+
ле 52
|
54 |
+
да 53
|
55 |
+
ти 54
|
56 |
+
де 55
|
57 |
+
▁вот 56
|
58 |
+
ки 57
|
59 |
+
ди 58
|
60 |
+
ам 59
|
61 |
+
▁я 60
|
62 |
+
те 61
|
63 |
+
▁про 62
|
64 |
+
▁как 63
|
65 |
+
▁так 64
|
66 |
+
▁да 65
|
67 |
+
ру 66
|
68 |
+
не 67
|
69 |
+
ви 68
|
70 |
+
ени 69
|
71 |
+
▁мо 70
|
72 |
+
же 71
|
73 |
+
та 72
|
74 |
+
сть 73
|
75 |
+
▁ра 74
|
76 |
+
чи 75
|
77 |
+
▁во 76
|
78 |
+
▁за 77
|
79 |
+
▁со 78
|
80 |
+
▁бы 79
|
81 |
+
лю 80
|
82 |
+
ма 81
|
83 |
+
се 82
|
84 |
+
ча 83
|
85 |
+
ля 84
|
86 |
+
▁до 85
|
87 |
+
хо 86
|
88 |
+
▁г 87
|
89 |
+
ци 88
|
90 |
+
▁ну 89
|
91 |
+
ста 90
|
92 |
+
▁ф 91
|
93 |
+
▁при 92
|
94 |
+
сь 93
|
95 |
+
▁вы 94
|
96 |
+
▁го 95
|
97 |
+
▁се 96
|
98 |
+
че 97
|
99 |
+
▁но 98
|
100 |
+
ми 99
|
101 |
+
▁об 100
|
102 |
+
▁от 101
|
103 |
+
ве 102
|
104 |
+
▁де 103
|
105 |
+
за 104
|
106 |
+
до 105
|
107 |
+
▁все 106
|
108 |
+
жи 107
|
109 |
+
ры 108
|
110 |
+
си 109
|
111 |
+
ше 110
|
112 |
+
сто 111
|
113 |
+
▁те 112
|
114 |
+
▁ка 113
|
115 |
+
сти 114
|
116 |
+
▁мы 115
|
117 |
+
му 116
|
118 |
+
ду 117
|
119 |
+
ере 118
|
120 |
+
лу 119
|
121 |
+
по 120
|
122 |
+
ще 121
|
123 |
+
ку 122
|
124 |
+
▁есть 123
|
125 |
+
ой 124
|
126 |
+
ень 125
|
127 |
+
ну 126
|
128 |
+
ца 127
|
129 |
+
льно 128
|
130 |
+
ско 129
|
131 |
+
зо 130
|
132 |
+
ски 131
|
133 |
+
▁бу 132
|
134 |
+
ши 133
|
135 |
+
▁ма 134
|
136 |
+
ется 135
|
137 |
+
ня 136
|
138 |
+
мо 137
|
139 |
+
▁кото 138
|
140 |
+
со 139
|
141 |
+
▁ре 140
|
142 |
+
ги 141
|
143 |
+
▁бо 142
|
144 |
+
▁он 143
|
145 |
+
▁ни 144
|
146 |
+
жа 145
|
147 |
+
▁х 146
|
148 |
+
жно 147
|
149 |
+
га 148
|
150 |
+
па 149
|
151 |
+
бо 150
|
152 |
+
ту 151
|
153 |
+
ный 152
|
154 |
+
енно 153
|
155 |
+
▁из 154
|
156 |
+
вы 155
|
157 |
+
пи 156
|
158 |
+
▁раз 157
|
159 |
+
дца 158
|
160 |
+
ные 159
|
161 |
+
ют 160
|
162 |
+
▁ли 161
|
163 |
+
▁там 162
|
164 |
+
ств 163
|
165 |
+
вер 164
|
166 |
+
ке 165
|
167 |
+
▁ва 166
|
168 |
+
▁зна 167
|
169 |
+
мот 168
|
170 |
+
са 169
|
171 |
+
▁хо 170
|
172 |
+
дет 171
|
173 |
+
гда 172
|
174 |
+
ять 173
|
175 |
+
▁пре 174
|
176 |
+
ме 175
|
177 |
+
▁мне 176
|
178 |
+
ери 177
|
179 |
+
▁ви 178
|
180 |
+
ты 179
|
181 |
+
ения 180
|
182 |
+
▁ро 181
|
183 |
+
ря 182
|
184 |
+
ение 183
|
185 |
+
чно 184
|
186 |
+
зи 185
|
187 |
+
▁оч 186
|
188 |
+
▁си 187
|
189 |
+
вать 188
|
190 |
+
▁которы 189
|
191 |
+
▁ми 190
|
192 |
+
▁ты 191
|
193 |
+
▁фи 192
|
194 |
+
ша 193
|
195 |
+
каза 194
|
196 |
+
▁под 195
|
197 |
+
▁сам 196
|
198 |
+
лько 197
|
199 |
+
клю 198
|
200 |
+
▁ме 199
|
201 |
+
щи 200
|
202 |
+
бы 201
|
203 |
+
▁ве 202
|
204 |
+
би 203
|
205 |
+
▁очень 204
|
206 |
+
▁пос 205
|
207 |
+
тому 206
|
208 |
+
вет 207
|
209 |
+
дцать 208
|
210 |
+
зы 209
|
211 |
+
фи 210
|
212 |
+
ных 211
|
213 |
+
сли 212
|
214 |
+
▁же 213
|
215 |
+
бя 214
|
216 |
+
ная 215
|
217 |
+
▁пере 216
|
218 |
+
▁сто 217
|
219 |
+
▁ст 218
|
220 |
+
▁два 219
|
221 |
+
▁гово 220
|
222 |
+
▁они 221
|
223 |
+
▁для 222
|
224 |
+
▁ин 223
|
225 |
+
ете 224
|
226 |
+
▁лю 225
|
227 |
+
▁па 226
|
228 |
+
нима 227
|
229 |
+
пе 228
|
230 |
+
▁сво 229
|
231 |
+
ба 230
|
232 |
+
▁три 231
|
233 |
+
шь 232
|
234 |
+
▁мен 233
|
235 |
+
ут 234
|
236 |
+
▁ку 235
|
237 |
+
ска 236
|
238 |
+
ной 237
|
239 |
+
▁нас 238
|
240 |
+
▁бе 239
|
241 |
+
емь 240
|
242 |
+
ного 241
|
243 |
+
▁если 242
|
244 |
+
бе 243
|
245 |
+
це 244
|
246 |
+
▁пер 245
|
247 |
+
▁че 246
|
248 |
+
▁вклю 247
|
249 |
+
▁ста 248
|
250 |
+
ство 249
|
251 |
+
ему 250
|
252 |
+
мен 251
|
253 |
+
▁са 252
|
254 |
+
▁сей 253
|
255 |
+
▁будет 254
|
256 |
+
▁ки 255
|
257 |
+
час 256
|
258 |
+
пу 257
|
259 |
+
▁еще 258
|
260 |
+
▁смот 259
|
261 |
+
▁сейчас 260
|
262 |
+
▁ча 261
|
263 |
+
ды 262
|
264 |
+
▁его 263
|
265 |
+
дин 264
|
266 |
+
рем 265
|
267 |
+
▁эти 266
|
268 |
+
▁ру 267
|
269 |
+
▁уже 268
|
270 |
+
▁она 269
|
271 |
+
▁кон 270
|
272 |
+
про 271
|
273 |
+
гра 272
|
274 |
+
сси 273
|
275 |
+
▁дру 274
|
276 |
+
▁меня 275
|
277 |
+
ции 276
|
278 |
+
ходи 277
|
279 |
+
▁мож 278
|
280 |
+
рет 279
|
281 |
+
лы 280
|
282 |
+
ное 281
|
283 |
+
▁или 282
|
284 |
+
▁пра 283
|
285 |
+
ят 284
|
286 |
+
▁жи 285
|
287 |
+
▁таки 286
|
288 |
+
ние 287
|
289 |
+
бот 288
|
290 |
+
ться 289
|
291 |
+
▁включи 290
|
292 |
+
ров 291
|
293 |
+
▁су 292
|
294 |
+
тельно 293
|
295 |
+
▁ба 294
|
296 |
+
чу 295
|
297 |
+
об 296
|
298 |
+
▁ш 297
|
299 |
+
▁просто 298
|
300 |
+
су 299
|
301 |
+
мер 300
|
302 |
+
ей 301
|
303 |
+
▁филь 302
|
304 |
+
▁когда 303
|
305 |
+
▁сказа 304
|
306 |
+
▁зак 305
|
307 |
+
мы 306
|
308 |
+
ая 307
|
309 |
+
ран 308
|
310 |
+
▁ше 309
|
311 |
+
тель 310
|
312 |
+
▁потому 311
|
313 |
+
чески 312
|
314 |
+
▁боль 313
|
315 |
+
он 314
|
316 |
+
▁вам 315
|
317 |
+
▁вос 316
|
318 |
+
дь 317
|
319 |
+
▁од 318
|
320 |
+
ую 319
|
321 |
+
▁рас 320
|
322 |
+
▁дол 321
|
323 |
+
▁чтобы 322
|
324 |
+
▁чет 323
|
325 |
+
▁пока 324
|
326 |
+
▁дев 325
|
327 |
+
лся 326
|
328 |
+
▁было 327
|
329 |
+
деся 328
|
330 |
+
нов 329
|
331 |
+
▁пять 330
|
332 |
+
став 331
|
333 |
+
▁пред 332
|
334 |
+
▁каки 333
|
335 |
+
ое 334
|
336 |
+
лен 335
|
337 |
+
▁можно 336
|
338 |
+
▁зде 337
|
339 |
+
▁один 338
|
340 |
+
▁здесь 339
|
341 |
+
тся 340
|
342 |
+
▁говори 341
|
343 |
+
йте 342
|
344 |
+
▁которые 343
|
345 |
+
ния 344
|
346 |
+
том 345
|
347 |
+
гу 346
|
348 |
+
▁воз 347
|
349 |
+
▁ис 348
|
350 |
+
▁тебя 349
|
351 |
+
▁най 350
|
352 |
+
▁сери 351
|
353 |
+
тов 352
|
354 |
+
▁ле 353
|
355 |
+
▁ду 354
|
356 |
+
ор 355
|
357 |
+
▁нет 356
|
358 |
+
зыва 357
|
359 |
+
дела 358
|
360 |
+
зу 359
|
361 |
+
тив 360
|
362 |
+
десят 361
|
363 |
+
ха 362
|
364 |
+
▁му 363
|
365 |
+
ской 364
|
366 |
+
▁работ 365
|
367 |
+
▁дела 366
|
368 |
+
лове 367
|
369 |
+
ция 368
|
370 |
+
лет 369
|
371 |
+
▁может 370
|
372 |
+
▁ди 371
|
373 |
+
ным 372
|
374 |
+
ственно 373
|
375 |
+
бер 374
|
376 |
+
бу 375
|
377 |
+
▁врем 376
|
378 |
+
▁им 377
|
379 |
+
▁ско 378
|
380 |
+
▁оп 379
|
381 |
+
▁где 380
|
382 |
+
▁сезо 381
|
383 |
+
▁только 382
|
384 |
+
год 383
|
385 |
+
▁пи 384
|
386 |
+
▁теле 385
|
387 |
+
жен 386
|
388 |
+
▁четы 387
|
389 |
+
▁челове 388
|
390 |
+
▁тоже 389
|
391 |
+
рой 390
|
392 |
+
лее 391
|
393 |
+
▁ц 392
|
394 |
+
▁та 393
|
395 |
+
▁после 394
|
396 |
+
▁дж 395
|
397 |
+
▁росси 396
|
398 |
+
▁быть 397
|
399 |
+
▁сдела 398
|
400 |
+
ром 399
|
401 |
+
сы 400
|
402 |
+
▁фильм 401
|
403 |
+
▁хот 402
|
404 |
+
лись 403
|
405 |
+
▁сло 404
|
406 |
+
ства 405
|
407 |
+
���такой 406
|
408 |
+
▁этот 407
|
409 |
+
▁ю 408
|
410 |
+
рав 409
|
411 |
+
стви 410
|
412 |
+
ал 411
|
413 |
+
лось 412
|
414 |
+
▁смотре 413
|
415 |
+
ков 414
|
416 |
+
▁семь 415
|
417 |
+
▁коне 416
|
418 |
+
▁этом 417
|
419 |
+
▁ту 418
|
420 |
+
▁полу 419
|
421 |
+
▁кра 420
|
422 |
+
▁тем 421
|
423 |
+
при 422
|
424 |
+
ную 423
|
425 |
+
нал 424
|
426 |
+
▁обра 425
|
427 |
+
ом 426
|
428 |
+
▁том 427
|
429 |
+
шке 428
|
430 |
+
▁был 429
|
431 |
+
сно 430
|
432 |
+
▁их 431
|
433 |
+
реть 432
|
434 |
+
кт 433
|
435 |
+
годня 434
|
436 |
+
▁четыре 435
|
437 |
+
чит 436
|
438 |
+
▁этого 437
|
439 |
+
▁ска 438
|
440 |
+
▁чем 439
|
441 |
+
▁сегодня 440
|
442 |
+
▁хоро 441
|
443 |
+
▁конечно 442
|
444 |
+
▁кар 443
|
445 |
+
▁тв 444
|
446 |
+
▁инт 445
|
447 |
+
вой 446
|
448 |
+
сте 447
|
449 |
+
лей 448
|
450 |
+
▁ти 449
|
451 |
+
▁даже 450
|
452 |
+
▁надо 451
|
453 |
+
сот 452
|
454 |
+
▁смотрешке 453
|
455 |
+
▁пу 454
|
456 |
+
ном 455
|
457 |
+
▁вс 456
|
458 |
+
рова 457
|
459 |
+
▁какие 458
|
460 |
+
лем 459
|
461 |
+
ент 460
|
462 |
+
лют 461
|
463 |
+
вый 462
|
464 |
+
▁сбер 463
|
465 |
+
▁сказать 464
|
466 |
+
ский 465
|
467 |
+
она 466
|
468 |
+
▁нам 467
|
469 |
+
ского 468
|
470 |
+
▁того 469
|
471 |
+
нь 470
|
472 |
+
▁ее 471
|
473 |
+
▁ха 472
|
474 |
+
▁какой 473
|
475 |
+
▁стра 474
|
476 |
+
▁каж 475
|
477 |
+
▁ 476
|
478 |
+
о 477
|
479 |
+
е 478
|
480 |
+
а 479
|
481 |
+
т 480
|
482 |
+
и 481
|
483 |
+
н 482
|
484 |
+
с 483
|
485 |
+
р 484
|
486 |
+
в 485
|
487 |
+
л 486
|
488 |
+
к 487
|
489 |
+
м 488
|
490 |
+
д 489
|
491 |
+
у 490
|
492 |
+
п 491
|
493 |
+
ь 492
|
494 |
+
я 493
|
495 |
+
ы 494
|
496 |
+
ч 495
|
497 |
+
б 496
|
498 |
+
з 497
|
499 |
+
г 498
|
500 |
+
й 499
|
501 |
+
ж 500
|
502 |
+
х 501
|
503 |
+
ю 502
|
504 |
+
ш 503
|
505 |
+
э 504
|
506 |
+
ц 505
|
507 |
+
ф 506
|
508 |
+
щ 507
|
509 |
+
n 508
|
510 |
+
ъ 509
|
511 |
+
a 510
|
512 |
+
ё 511
|
513 |
+
<blk> 512
|