lauraibnz commited on
Commit
f0a6291
1 Parent(s): ac7b00d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -15,10 +15,7 @@ pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", contr
15
  pipe = pipe.to(device)
16
 
17
  def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=5, controlnet_conditioning_scale=1, num_inference_steps=20, guess_mode=False):
18
- if midi_file:
19
- midi_file = midi_file.name
20
- else:
21
- midi_file = "test.mid"
22
  midi = PrettyMIDI(midi_file)
23
  audio = pipe(
24
  prompt,
@@ -35,9 +32,9 @@ demo = gr.Interface(fn=predict, inputs=[
35
  gr.File(file_types=[".mid"]),
36
  "text",
37
  gr.Textbox(label="negative prompt"),
38
- gr.Slider(0, 30, value=5, step=5, label="duration (seconds)"),
39
  gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale"),
40
  gr.Slider(0, 50, value=20, step=0.1, label="inference steps"),
41
  gr.Checkbox(label="guess mode")
42
- ], outputs="audio")
43
  demo.launch()
 
15
  pipe = pipe.to(device)
16
 
17
  def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=5, controlnet_conditioning_scale=1, num_inference_steps=20, guess_mode=False):
18
+ midi_file = midi_file.name
 
 
 
19
  midi = PrettyMIDI(midi_file)
20
  audio = pipe(
21
  prompt,
 
32
  gr.File(file_types=[".mid"]),
33
  "text",
34
  gr.Textbox(label="negative prompt"),
35
+ gr.Slider(0, 30, value=10, step=5, label="duration (seconds)"),
36
  gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale"),
37
  gr.Slider(0, 50, value=20, step=0.1, label="inference steps"),
38
  gr.Checkbox(label="guess mode")
39
+ ], outputs="audio", examples=["test.mid", "piano"], cache_examples=True)
40
  demo.launch()