soujanyaporia commited on
Commit
f372519
1 Parent(s): 75ffc27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -15
app.py CHANGED
@@ -52,13 +52,13 @@ class Tango:
52
  for i in range(0, len(lst), n):
53
  yield lst[i:i + n]
54
 
55
- def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
56
  """ Genrate audio for a single prompt string. """
57
  with torch.no_grad():
58
  latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
59
  mel = self.vae.decode_first_stage(latents)
60
  wave = self.vae.decode_to_waveform(mel)
61
- return wave[0]
62
 
63
  def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
64
  """ Genrate audio for a list of prompt strings. """
@@ -82,15 +82,41 @@ tango.vae.to(device_type)
82
  tango.stft.to(device_type)
83
  tango.model.to(device_type)
84
 
85
- @spaces.GPU(duration=60)
86
  def gradio_generate(prompt, steps, guidance):
87
  output_wave = tango.generate(prompt, steps, guidance)
88
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
89
- output_filename = "temp.wav"
90
- wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
91
-
92
- return output_filename
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  description_text = """
95
  <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
96
  Generate audio using TANGO by providing a text prompt.
@@ -101,15 +127,11 @@ For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping
101
  Using this ChatGPT-generated description of the sound, TANGO provides superior results.
102
  <p/>
103
  """
104
- # description_text = """
105
- # <p><a href="https://huggingface.co/spaces/declare-lab/tango2/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
106
- # Generate audio using Tango2 by providing a text prompt. Tango2 was built from Tango and was trained on <a href="https://huggingface.co/datasets/declare-lab/audio-alpaca">Audio-alpaca</a>
107
- # <br/><br/> This is the demo for Tango2 for text to audio generation: <a href="https://arxiv.org/abs/2404.09956">Read our paper.</a>
108
- # <p/>
109
- # """
110
  # Gradio input and output components
111
  input_text = gr.Textbox(lines=2, label="Prompt")
112
- output_audio = gr.Audio(label="Generated Audio", type="filepath")
 
 
113
  denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
114
  guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
115
 
@@ -117,7 +139,7 @@ guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guid
117
  gr_interface = gr.Interface(
118
  fn=gradio_generate,
119
  inputs=[input_text, denoising_steps, guidance_scale],
120
- outputs=[output_audio],
121
  title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
122
  description=description_text,
123
  allow_flagging=False,
 
52
  for i in range(0, len(lst), n):
53
  yield lst[i:i + n]
54
 
55
+ def generate(self, prompt, steps=100, guidance=3, samples=3, disable_progress=True):
56
  """ Genrate audio for a single prompt string. """
57
  with torch.no_grad():
58
  latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
59
  mel = self.vae.decode_first_stage(latents)
60
  wave = self.vae.decode_to_waveform(mel)
61
+ return wave
62
 
63
  def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
64
  """ Genrate audio for a list of prompt strings. """
 
82
  tango.stft.to(device_type)
83
  tango.model.to(device_type)
84
 
85
+ @spaces.GPU(duration=120)
86
  def gradio_generate(prompt, steps, guidance):
87
  output_wave = tango.generate(prompt, steps, guidance)
88
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
 
 
 
 
89
 
90
+ output_filename_1 = "tmp1_.wav"
91
+ wavio.write(output_filename_1, output_wave[0], rate=16000, sampwidth=2)
92
+
93
+ output_filename_2 = "tmp2_.wav"
94
+ wavio.write(output_filename_2, output_wave[1], rate=16000, sampwidth=2)
95
+
96
+ output_filename_3 = "tmp3_.wav"
97
+ wavio.write(output_filename_3, output_wave[2], rate=16000, sampwidth=2)
98
+
99
+ return [output_filename_1, output_filename_2, output_filename_3]
100
+
101
+ # description_text = """
102
+ # <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
103
+ # Generate audio using TANGO by providing a text prompt.
104
+ # <br/><br/>Limitations: TANGO is trained on the small AudioCaps dataset so it may not generate good audio \
105
+ # samples related to concepts that it has not seen in training (e.g. singing). For the same reason, TANGO \
106
+ # is not always able to finely control its generations over textual control prompts. For example, \
107
+ # the generations from TANGO for prompts Chopping tomatoes on a wooden table and Chopping potatoes \
108
+ # on a metal table are very similar. \
109
+ # <br/><br/>We are currently training another version of TANGO on larger datasets to enhance its generalization, \
110
+ # compositional and controllable generation ability.
111
+ # <br/><br/>We recommend using a guidance scale of 3. The default number of steps is set to 100. More steps generally lead to better quality of generated audios but will take longer.
112
+ # <br/><br/>
113
+ # <h1> ChatGPT-enhanced audio generation</h1>
114
+ # <br/>
115
+ # As TANGO consists of an instruction-tuned LLM, it is able to process complex sound descriptions allowing us to provide more detailed instructions to improve the generation quality.
116
+ # For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping against the hull of the boat or splashing as you move through the waves''. The latter is obtained by prompting ChatGPT to explain the sound generated when a boat moves on the sea.
117
+ # Using this ChatGPT-generated description of the sound, TANGO provides superior results.
118
+ # <p/>
119
+ # """
120
  description_text = """
121
  <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
122
  Generate audio using TANGO by providing a text prompt.
 
127
  Using this ChatGPT-generated description of the sound, TANGO provides superior results.
128
  <p/>
129
  """
 
 
 
 
 
 
130
  # Gradio input and output components
131
  input_text = gr.Textbox(lines=2, label="Prompt")
132
+ output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
133
+ output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
134
+ output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
135
  denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
136
  guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
137
 
 
139
  gr_interface = gr.Interface(
140
  fn=gradio_generate,
141
  inputs=[input_text, denoising_steps, guidance_scale],
142
+ outputs=[output_audio_1, output_audio_2, output_audio_3],
143
  title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
144
  description=description_text,
145
  allow_flagging=False,