Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import numpy as
|
2 |
|
3 |
import streamlit as st
|
4 |
import librosa
|
@@ -28,8 +28,8 @@ def load_model():
|
|
28 |
return session, onnx_model, input_names, output_names
|
29 |
|
30 |
def inference(re_im, session, onnx_model, input_names, output_names):
|
31 |
-
inputs = {input_names[i]:
|
32 |
-
dtype=
|
33 |
for i, _input in enumerate(onnx_model.graph.input)
|
34 |
}
|
35 |
|
@@ -42,7 +42,7 @@ def inference(re_im, session, onnx_model, input_names, output_names):
|
|
42 |
inputs[input_names[3]] = mlp_state
|
43 |
output_audio.append(out)
|
44 |
|
45 |
-
output_audio = torch.tensor(
|
46 |
output_audio = output_audio.permute(1, 0, 2).contiguous()
|
47 |
output_audio = torch.view_as_complex(output_audio)
|
48 |
output_audio = torch.istft(output_audio, window, stride, window=hann)
|
@@ -51,16 +51,16 @@ def inference(re_im, session, onnx_model, input_names, output_names):
|
|
51 |
def visualize(hr, lr, recon, sr):
|
52 |
sr = sr
|
53 |
window_size = 1024
|
54 |
-
window =
|
55 |
|
56 |
stft_hr = librosa.core.spectrum.stft(hr, n_fft=window_size, hop_length=512, window=window)
|
57 |
-
stft_hr = 2 *
|
58 |
|
59 |
stft_lr = librosa.core.spectrum.stft(lr, n_fft=window_size, hop_length=512, window=window)
|
60 |
-
stft_lr = 2 *
|
61 |
|
62 |
stft_recon = librosa.core.spectrum.stft(recon, n_fft=window_size, hop_length=512, window=window)
|
63 |
-
stft_recon = 2 *
|
64 |
|
65 |
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, sharex=True, figsize=(16, 12))
|
66 |
ax1.title.set_text('Оригинальный сигнал')
|
|
|
1 |
+
import numpy as numpy
|
2 |
|
3 |
import streamlit as st
|
4 |
import librosa
|
|
|
28 |
return session, onnx_model, input_names, output_names
|
29 |
|
30 |
def inference(re_im, session, onnx_model, input_names, output_names):
|
31 |
+
inputs = {input_names[i]: numpy.zeros([d.dim_value for d in _input.type.tensor_type.shape.dim],
|
32 |
+
dtype=numpy.float32)
|
33 |
for i, _input in enumerate(onnx_model.graph.input)
|
34 |
}
|
35 |
|
|
|
42 |
inputs[input_names[3]] = mlp_state
|
43 |
output_audio.append(out)
|
44 |
|
45 |
+
output_audio = torch.tensor(numpy.concatenate(output_audio, 0))
|
46 |
output_audio = output_audio.permute(1, 0, 2).contiguous()
|
47 |
output_audio = torch.view_as_complex(output_audio)
|
48 |
output_audio = torch.istft(output_audio, window, stride, window=hann)
|
|
|
51 |
def visualize(hr, lr, recon, sr):
|
52 |
sr = sr
|
53 |
window_size = 1024
|
54 |
+
window = numpy.hanning(window_size)
|
55 |
|
56 |
stft_hr = librosa.core.spectrum.stft(hr, n_fft=window_size, hop_length=512, window=window)
|
57 |
+
stft_hr = 2 * numpy.abs(stft_hr) / numpy.sum(window)
|
58 |
|
59 |
stft_lr = librosa.core.spectrum.stft(lr, n_fft=window_size, hop_length=512, window=window)
|
60 |
+
stft_lr = 2 * numpy.abs(stft_lr) / numpy.sum(window)
|
61 |
|
62 |
stft_recon = librosa.core.spectrum.stft(recon, n_fft=window_size, hop_length=512, window=window)
|
63 |
+
stft_recon = 2 * numpy.abs(stft_recon) / numpy.sum(window)
|
64 |
|
65 |
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, sharex=True, figsize=(16, 12))
|
66 |
ax1.title.set_text('Оригинальный сигнал')
|