File size: 3,434 Bytes
66a6dc0
 
 
 
 
 
 
 
 
 
ac16f4c
66a6dc0
 
ac16f4c
 
 
66a6dc0
 
 
 
ac16f4c
 
 
 
 
66a6dc0
ac16f4c
66a6dc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac16f4c
66a6dc0
 
 
 
 
ac16f4c
 
 
 
 
 
66a6dc0
 
 
 
 
 
 
 
 
 
 
 
 
331c13d
66a6dc0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import gradio as gr
import numpy as np
import resampy
import torch
import torchaudio
from huggingface_hub import hf_hub_download

from deepafx_st.system import System
from deepafx_st.utils import DSPMode

system_speech = System.load_from_checkpoint(
    hf_hub_download("nateraw/deepafx-st-libritts-autodiff", "lit_model.ckpt"), batch_size=1
).eval()
system_music = System.load_from_checkpoint(
    hf_hub_download("nateraw/deepafx-st-jamendo-autodiff", "lit_model.ckpt"), batch_size=1
).eval()

gpu = torch.cuda.is_available()

if gpu:
    system_speech.to("cuda")
    system_music.to("cuda")


def process(input_path, reference_path, model):

    system = system_speech if model == "speech" else system_music

    # load audio data
    x, x_sr = torchaudio.load(input_path)
    r, r_sr = torchaudio.load(reference_path)

    # resample if needed
    if x_sr != 24000:
        print("Resampling to 24000 Hz...")
        x_24000 = torch.tensor(resampy.resample(x.view(-1).numpy(), x_sr, 24000))
        x_24000 = x_24000.view(1, -1)
    else:
        x_24000 = x

    if r_sr != 24000:
        print("Resampling to 24000 Hz...")
        r_24000 = torch.tensor(resampy.resample(r.view(-1).numpy(), r_sr, 24000))
        r_24000 = r_24000.view(1, -1)
    else:
        r_24000 = r

    # peak normalize to -12 dBFS
    x_24000 = x_24000[0:1, : 24000 * 5]
    x_24000 /= x_24000.abs().max()
    x_24000 *= 10 ** (-12 / 20.0)
    x_24000 = x_24000.view(1, 1, -1)

    # peak normalize to -12 dBFS
    r_24000 = r_24000[0:1, : 24000 * 5]
    r_24000 /= r_24000.abs().max()
    r_24000 *= 10 ** (-12 / 20.0)
    r_24000 = r_24000.view(1, 1, -1)

    if gpu:
        x_24000 = x_24000.to("cuda")
        r_24000 = r_24000.to("cuda")

    with torch.no_grad():
        y_hat, p, e = system(x_24000, r_24000)

    y_hat = y_hat.view(1, -1)
    y_hat /= y_hat.abs().max()
    x_24000 /= x_24000.abs().max()

    # Sqeeze to (T,), convert to numpy, and convert to int16
    out_audio = (32767 * y_hat).squeeze(0).detach().cpu().numpy().astype(np.int16)

    return 24000, out_audio


gr.Interface(
    fn=process,
    inputs=[gr.Audio(type="filepath"), gr.Audio(type="filepath"), gr.Dropdown(["speech", "music"], value="speech")],
    outputs="audio",
    examples=[
        [
            hf_hub_download("nateraw/examples", "voice_raw.wav", repo_type="dataset", cache_dir="./data"),
            hf_hub_download("nateraw/examples", "voice_produced.wav", repo_type="dataset", cache_dir="./data"),
            "speech",
        ],
        [
            hf_hub_download("nateraw/examples", "nys_of_mind.wav", repo_type="dataset", cache_dir="./data"),
            hf_hub_download("nateraw/examples", "world_is_yours_highpass.wav", repo_type="dataset", cache_dir="./data"),
            "music",
        ],
    ],
    title="DeepAFx-ST",
    description=(
        "Gradio demo for DeepAFx-ST for style transfer of audio effects with differentiable signal processing. To use it, simply"
        " upload your audio files or choose from one of the examples. Read more at the links below."
    ),
    article=(
        "<div style='text-align: center;'><a href='https://github.com/adobe-research/DeepAFx-ST' target='_blank'>Github Repo</a>"
        " <center><img src='https://visitor-badge.glitch.me/badge?page_id=nateraw_deepafx-st' alt='visitor"
        " badge'></center></div>"
    ),
    allow_flagging="never",
    cache_examples=False
).launch()