File size: 11,234 Bytes
fd43906
 
734030f
fd43906
 
 
 
 
 
 
9a3ee3f
fd43906
 
 
734030f
 
eb8daef
 
 
 
 
 
 
 
fd43906
eb8daef
fd43906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbf5d25
fd43906
 
 
4550b19
fbf5d25
fd43906
 
 
 
4550b19
fd43906
 
fbf5d25
fd43906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb8daef
 
 
fd43906
4550b19
fbf5d25
 
54ed3cf
 
 
734030f
 
 
fbf5d25
734030f
 
 
 
 
 
fbf5d25
fd43906
4550b19
9a3ee3f
fbf5d25
4550b19
9a3ee3f
 
 
fbf5d25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4550b19
54ed3cf
 
 
 
 
fd43906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298880e
9a3ee3f
298880e
 
 
 
fd43906
 
9a3ee3f
fbf5d25
4550b19
 
 
fbf5d25
fd43906
734030f
 
fd43906
 
 
 
734030f
4550b19
298880e
fd43906
 
 
734030f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd43906
84a7bf7
fd43906
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
import gradio as gr
import json
import random
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from models import AudioDiffusion, DDPMScheduler
from audioldm.audio.stft import TacotronSTFT
from audioldm.variational_autoencoder import AutoencoderKL
from pydub import AudioSegment
from gradio import Markdown
import spaces

max_64_bit_int = 2**63 - 1

# Automatic device detection
if torch.cuda.is_available():
    device_type = "cuda"
    device_selection = "cuda:0"
else:
    device_type = "cpu"
    device_selection = "cpu"

class Tango:
    def __init__(self, name="declare-lab/tango2-full", device=device_selection):
        
        path = snapshot_download(repo_id=name)
        
        vae_config = json.load(open("{}/vae_config.json".format(path)))
        stft_config = json.load(open("{}/stft_config.json".format(path)))
        main_config = json.load(open("{}/main_config.json".format(path)))
        
        self.vae = AutoencoderKL(**vae_config).to(device)
        self.stft = TacotronSTFT(**stft_config).to(device)
        self.model = AudioDiffusion(**main_config).to(device)
        
        vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device)
        stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device)
        main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device)
        
        self.vae.load_state_dict(vae_weights)
        self.stft.load_state_dict(stft_weights)
        self.model.load_state_dict(main_weights)

        print ("Successfully loaded checkpoint from:", name)
        
        self.vae.eval()
        self.stft.eval()
        self.model.eval()
        
        self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler")
        
    def chunks(self, lst, n):
        # Yield successive n-sized chunks from a list
        for i in range(0, len(lst), n):
            yield lst[i:i + n]
        
    def generate(self, prompt, steps=100, guidance=3, samples=3, disable_progress=True):
        # Genrate audio for a single prompt string
        with torch.no_grad():
            latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
            mel = self.vae.decode_first_stage(latents)
            wave = self.vae.decode_to_waveform(mel)
        return wave
    
    def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
        # Genrate audio for a list of prompt strings
        outputs = []
        for k in tqdm(range(0, len(prompts), batch_size)):
            batch = prompts[k: k+batch_size]
            with torch.no_grad():
                latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
                mel = self.vae.decode_first_stage(latents)
                wave = self.vae.decode_to_waveform(mel)
                outputs += [item for item in wave]
        if samples == 1:
            return outputs
        else:
            return list(self.chunks(outputs, samples))

# Initialize TANGO

tango = Tango(device="cpu")
tango.vae.to(device_type)
tango.stft.to(device_type)
tango.model.to(device_type)
    
@spaces.GPU(duration=120)
def gradio_generate(
    prompt,
    output_format="wav",
    output_number=3,
    steps=100,
    guidance=3,
    is_randomize_seed=True,
    seed=123
):
    if is_randomize_seed:
        seed = random.randint(0, max_64_bit_int)

    random.seed(seed)
    torch.manual_seed(seed)

    output_wave = tango.generate(prompt, steps, guidance, output_number)
    # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"

    output_filename_1 = "tmp1.wav"
    wavio.write(output_filename_1, output_wave[0], rate = 16000, sampwidth = 2)

    if (output_format == "mp3"):
        AudioSegment.from_wav("tmp1.wav").export("tmp1.mp3", format = "mp3")
        output_filename_1 = "tmp1.mp3"

    if (2 <= output_number):
        output_filename_2 = "tmp2.wav"
        wavio.write(output_filename_2, output_wave[1], rate = 16000, sampwidth = 2)

        if (output_format == "mp3"):
            AudioSegment.from_wav("tmp2.wav").export("tmp2.mp3", format = "mp3")
            output_filename_2 = "tmp2.mp3"
    else:
        output_filename_2 = None

    if (output_number == 3):
        output_filename_3 = "tmp3.wav"
        wavio.write(output_filename_3, output_wave[2], rate = 16000, sampwidth = 2)

        if (output_format == "mp3"):
            AudioSegment.from_wav("tmp3.wav").export("tmp3.mp3", format = "mp3")
            output_filename_3 = "tmp3.mp3"
    else:
        output_filename_3 = None

    return [
        gr.update(value = output_filename_1),
        gr.update(value = output_filename_2, visible = (2 <= output_number)),
        gr.update(value = output_filename_3, visible = (output_number == 3))
    ]

# description_text = """
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
# Generate audio using TANGO by providing a text prompt.
# <br/><br/>Limitations: TANGO is trained on the small AudioCaps dataset so it may not generate good audio \
# samples related to concepts that it has not seen in training (e.g. singing). For the same reason, TANGO \
# is not always able to finely control its generations over textual control prompts. For example, \
# the generations from TANGO for prompts Chopping tomatoes on a wooden table and Chopping potatoes \
# on a metal table are very similar. \
# <br/><br/>We are currently training another version of TANGO on larger datasets to enhance its generalization, \
# compositional and controllable generation ability.
# <br/><br/>We recommend using a guidance scale of 3. The default number of steps is set to 100. More steps generally lead to better quality of generated audios but will take longer.
# <br/><br/>
# <h1> ChatGPT-enhanced audio generation</h1>
# <br/>
# As TANGO consists of an instruction-tuned LLM, it is able to process complex sound descriptions allowing us to provide more detailed instructions to improve the generation quality.
# For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping against the hull of the boat or splashing as you move through the waves''. The latter is obtained by prompting ChatGPT to explain the sound generated when a boat moves on the sea.
# Using this ChatGPT-generated description of the sound, TANGO provides superior results.
# <p/>
# """
description_text = """
<p><a href="https://huggingface.co/spaces/declare-lab/tango2-full/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
<br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
<p/>
"""
# Gradio input and output components
input_text = gr.Textbox(lines=2, label="Prompt")
output_format = gr.Radio(label = "Output format", info = "The file you can download", choices = ["mp3", "wav"], value = "wav")
output_number = gr.Slider(label = "Number of generations", info = "1, 2 or 3 output files", minimum = 1, maximum = 3, value = 3, step = 1, interactive = True)
output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
denoising_steps = gr.Slider(minimum=10, maximum=200, value=100, step=1, label="Steps", interactive=True)
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")

# Gradio interface
gr_interface = gr.Interface(
    fn=gradio_generate,
    inputs=[input_text, output_format, output_number, denoising_steps, guidance_scale, randomize_seed, seed],
    outputs=[output_audio_1, output_audio_2, output_audio_3],
    title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
    description=description_text,
    allow_flagging=False,
    examples=[
        ["Quiet speech and then and airplane flying away", "wav", 1, 100, 3, False, 123],
        ["A bicycle peddling on dirt and gravel followed by a man speaking then laughing", "wav", 1, 100, 3, False, 123],
        ["Ducks quack and water splashes with some animal screeching in the background", "wav", 1, 100, 3, False, 123],
        ["Describe the sound of the ocean", "wav", 1, 100, 3, False, 123],
        ["A woman and a baby are having a conversation", "wav", 1, 100, 3, False, 123],
        ["A man speaks followed by a popping noise and laughter", "wav", 1, 100, 3, False, 123],
        ["A cup is filled from a faucet", "wav", 1, 100, 3, False, 123],
        ["An audience cheering and clapping", "wav", 1, 100, 3, False, 123],
        ["Rolling thunder with lightning strikes", "wav", 1, 100, 3, False, 123],
        ["A dog barking and a cat mewing and a racing car passes by", "wav", 1, 100, 3, False, 123],
        ["Gentle water stream, birds chirping and sudden gun shot", "wav", 1, 100, 3, False, 123],
        ["A man talking followed by a goat baaing then a metal gate sliding shut as ducks quack and wind blows into a microphone.", "wav", 1, 100, 3, False, 123],
        ["A dog barking", "wav", 1, 100, 3, False, 123],
        ["A cat meowing", "wav", 1, 100, 3, False, 123],
        ["Wooden table tapping sound while water pouring", "wav", 1, 100, 3, False, 123],
        ["Applause from a crowd with distant clicking and a man speaking over a loudspeaker", "wav", 1, 100, 3, False, 123],
        ["two gunshots followed by birds flying away while chirping", "wav", 1, 100, 3, False, 123],
        ["Whistling with birds chirping", "wav", 1, 100, 3, False, 123],
        ["A person snoring", "wav", 1, 100, 3, False, 123],
        ["Motor vehicles are driving with loud engines and a person whistles", "wav", 1, 100, 3, False, 123],
        ["People cheering in a stadium while thunder and lightning strikes", "wav", 1, 100, 3, False, 123],
        ["A helicopter is in flight", "wav", 1, 100, 3, False, 123],
        ["A dog barking and a man talking and a racing car passes by", "wav", 1, 100, 3, False, 123],
    ],
    cache_examples="lazy", # Turn on to cache.
)

# Launch Gradio app
gr_interface.queue(10).launch()