fffiloni commited on
Commit
8f90609
1 Parent(s): d1d7297

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -2
app.py CHANGED
@@ -109,6 +109,17 @@ def get_audioldm(prompt):
109
  audio_result = extract_audio(result)
110
  return audio_result
111
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  import re
114
  import torch
@@ -157,6 +168,9 @@ def infer(image_in, chosen_model):
157
  elif chosen_model == "AudioLDM-2" :
158
  gr.Info("Now calling AudioLDM-2 for music...")
159
  music_o = get_magnet(musical_prompt)
 
 
 
160
 
161
  return musical_prompt, music_o
162
 
@@ -185,11 +199,12 @@ with gr.Blocks(css=css) as demo:
185
  type = "filepath",
186
  elem_id = "image-in"
187
  )
188
- chosen_model = gr.Radio(
189
  label = "Choose a model",
190
  choices = [
191
  "MAGNet",
192
- "AudioLDM-2"
 
193
  ],
194
  value = "MAGNet"
195
  )
 
109
  audio_result = extract_audio(result)
110
  return audio_result
111
 
112
+ def get_riffusion(prompt):
113
+ client = Client("https://fffiloni-spectrogram-to-music.hf.space/--replicas/1qwjx/")
114
+ result = client.predict(
115
+ prompt, # str in 'Musical prompt' Textbox component
116
+ "", # str in 'Negative prompt' Textbox component
117
+ "", # filepath in 'parameter_4' Audio component
118
+ 10, # float (numeric value between 5 and 10) in 'Duration in seconds' Slider component
119
+ api_name="/predict"
120
+ )
121
+ print(result)
122
+ return result[1]
123
 
124
  import re
125
  import torch
 
168
  elif chosen_model == "AudioLDM-2" :
169
  gr.Info("Now calling AudioLDM-2 for music...")
170
  music_o = get_magnet(musical_prompt)
171
+ elif chosen_model == "Riffusion" :
172
+ gr.Info("Now calling Riffusion for music...")
173
+ music_o = get_riffusion(musical_prompt)
174
 
175
  return musical_prompt, music_o
176
 
 
199
  type = "filepath",
200
  elem_id = "image-in"
201
  )
202
+ chosen_model = gr.Dropdown(
203
  label = "Choose a model",
204
  choices = [
205
  "MAGNet",
206
+ "AudioLDM-2",
207
+ "Riffusion"
208
  ],
209
  value = "MAGNet"
210
  )