File size: 5,587 Bytes
32bac05
 
 
 
96edf76
32bac05
96edf76
32bac05
 
96edf76
f6c8d4d
32bac05
 
 
 
 
 
 
 
96edf76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32bac05
 
 
 
 
 
96edf76
32bac05
 
 
 
 
 
 
 
 
 
 
f6c8d4d
32bac05
f6c8d4d
 
32bac05
f6c8d4d
32bac05
 
 
 
 
 
96edf76
32bac05
 
 
 
 
 
 
 
 
 
 
96edf76
32bac05
 
 
 
 
 
 
f6c8d4d
 
 
 
96edf76
32bac05
 
 
 
 
 
 
96edf76
32bac05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
from tsmnet import Stretcher
import gradio as gr
from gradio import processing_utils
# import torch
import torchaudio
import yt_dlp

model_root = './weights'
yt_dl_dir = 'yt-audio'
available_models = ['speech', 'pop-music', 'classical-music']
working_sr = 22050

def prepare_models():
    return {
        weight: Stretcher(os.path.join(model_root, f'{weight}.pt'))
        for weight in available_models
    }

def download_yt_audio(url):
    # purge outdated audio files (older than 1 days)
    os.system(f'find {yt_dl_dir} -audio -mtime +1 -delete')

    ydl_opts = {
        'format': 'm4a/bestaudio/best',
        'postprocessors': [{  # Extract audio using ffmpeg
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'wav',
        }],
        'outtmpl': f"{yt_dl_dir}/%(id)s.%(ext)s"
    }

    with yt_dlp.YoutubeDL(ydl_opts) as ydl:
        try:
            ydl.cache.remove()
            meta = ydl.extract_info(url, download=False)
            audio_file = os.path.join(yt_dl_dir, meta['id'] + '.wav')
            if not os.path.isfile(audio_file):
                ydl.download(url)

        except yt_dlp.DownloadError as error:
            raise gr.Error(f'Failed to download from YouTube: {error}')

    return audio_file


def prepare_audio_file(rec, audio_file, yt_url):
    if rec is not None:
        return rec
    if audio_file is not None:
        return audio_file
    if yt_url != '':
        return download_yt_audio(yt_url)
    else:
        raise gr.Error('No audio found!')


def run(rec, audio_file, yt_url, speed, model, start_time, end_time):
    audio_file = prepare_audio_file(rec, audio_file, yt_url)

    x, sr = torchaudio.load(audio_file)
    x = torchaudio.transforms.Resample(orig_freq=sr, new_freq=working_sr)(x)
    sr = working_sr

    x = x[:, int(start_time * sr):int(end_time * sr)]

    if speed != 1:
        x = models[model](x, speed).cpu()

    torchaudio.save(audio_file, x, sr)
    return processing_utils.audio_from_file(audio_file)


# @@@@@@@ Start of the program @@@@@@@@

models = prepare_models()
os.makedirs(yt_dl_dir, exist_ok=True)

with gr.Blocks() as demo:
    gr.Markdown('# TSM-Net')
    gr.Markdown('---')
    with gr.Row():
        with gr.Column():
            with gr.Tab('From microphone'):
                rec_box = gr.Audio(label='Recording', source='microphone', type='filepath')
            with gr.Tab('From file'):
                audio_file_box = gr.Audio(label='Audio sample', type='filepath')
            with gr.Tab('From YouTube'):
                yt_url_box  = gr.Textbox(label='YouTube URL', placeholder='https://youtu.be/q6EoRBvdVPQ')

            rec_box.change(lambda: [None] * 2, outputs=[audio_file_box, yt_url_box])
            audio_file_box.change(lambda: [None] * 2, outputs=[rec_box, yt_url_box])
            yt_url_box.input(lambda: [None] * 2, outputs=[rec_box, audio_file_box])

            speed_box = gr.Slider(label='Playback speed', minimum=0, maximum=2, value=1)
            with gr.Accordion('Fine-grained settings', open=False):
                with gr.Tab('Trim audio sample (sec)'):
                    # gr.Markdown('### Trim audio sample (sec)')
                    with gr.Row():
                        start_time_box = gr.Number(label='Start', value=0)
                        end_time_box = gr.Number(label='End', value=60)
                model_box = gr.Dropdown(label='Model weight', choices=available_models, value=available_models[0])

            submit_btn = gr.Button('Submit')

        with gr.Column():
            with gr.Accordion('Hint', open=False):
                gr.Markdown('You can find more settings under the **Fine-grained settings**')
                gr.Markdown('- Waiting too long? Try to adjust the start/end timestamp')
                gr.Markdown('- Low audio quality? Try to switch to a proper model weight')
            outputs=gr.Audio(label='Output')

        submit_btn.click(fn=run, inputs=[
            rec_box,
            audio_file_box,
            yt_url_box,
            speed_box,
            model_box,
            start_time_box,
            end_time_box,
        ], outputs=outputs)

    with gr.Accordion('Read more ...', open=False):
        gr.Markdown('---')
        gr.Markdown(
            'We proposed a novel approach in the field of time-scale modification '
            'on audio signals. While traditional methods use the framing technique, '
            'spectral approach uses the short-time Fourier transform to preserve '
            'the frequency during temporal stretching. TSM-Net, our neural-network '
            'model encodes the raw audio into a high-level latent representation. '
            'We call it Neuralgram, in which one vector represents 1024 audio samples. '
            'It is inspired by the framing technique but addresses the clipping '
            'artifacts. The Neuralgram is a two-dimensional matrix with real values, '
            'we can apply some existing image resizing techniques on the Neuralgram '
            'and decode it using our neural decoder to obtain the time-scaled audio. '
            'Both the encoder and decoder are trained with GANs, which shows fair '
            'generalization ability on the scaled Neuralgrams. Our method yields '
            'little artifacts and opens a new possibility in the research of modern '
            'time-scale modification. Please find more detail in our '
            '<a href="https://arxiv.org/abs/2210.17152" target="_blank">paper</a>.'
        )

demo.queue(4)
demo.launch(server_name='0.0.0.0')