lauraibnz commited on
Commit
a43e51c
1 Parent(s): 5eec126

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -13,8 +13,9 @@ else:
13
  controlnet = ControlNetModel.from_pretrained("lauraibnz/midi-audioldm", torch_dtype=torch_dtype)
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
 
16
 
17
- def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=5, controlnet_conditioning_scale=1, num_inference_steps=20, guess_mode=False):
18
  midi_file = midi_file.name
19
  midi = PrettyMIDI(midi_file)
20
  audio = pipe(
@@ -25,6 +26,7 @@ def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=5,
25
  num_inference_steps=num_inference_steps,
26
  controlnet_conditioning_scale=float(controlnet_conditioning_scale),
27
  guess_mode=guess_mode,
 
28
  )
29
  return (16000, audio.audios.T)
30
 
@@ -33,8 +35,9 @@ demo = gr.Interface(fn=predict, inputs=[
33
  "text",
34
  gr.Textbox(label="negative prompt"),
35
  gr.Slider(0, 30, value=5, step=5, label="duration (seconds)"),
 
36
  gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale"),
37
  gr.Slider(0, 50, value=20, step=0.1, label="inference steps"),
38
  gr.Checkbox(label="guess mode")
39
- ], outputs="audio", examples=[["S00.mid", "piano", "", 10, 1.0, 20, False]], cache_examples=True, title="🎹 MIDI-AudioLDM", description="MIDI-AudioLDM is a MIDI-conditioned text-to-audio model based on the project [AudioLDM](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation). The model has been conditioned using the ControlNet architecture and has been developed within Hugging Face’s [🧨 Diffusers](https://huggingface.co/docs/diffusers/) framework. Once trained, MIDI-AudioLDM accepts a MIDI file and a text prompt as inputs and returns an audio file, which is an interpretation of the MIDI based on the given text description. This enables detailed control over different musical aspects such as notes, mood and timbre.", theme=gr.themes.Base())
40
  demo.launch()
 
13
  controlnet = ControlNetModel.from_pretrained("lauraibnz/midi-audioldm", torch_dtype=torch_dtype)
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
+ generator = torch.Generator(device)
17
 
18
+ def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=5, seed=0, controlnet_conditioning_scale=1, num_inference_steps=20, guess_mode=False):
19
  midi_file = midi_file.name
20
  midi = PrettyMIDI(midi_file)
21
  audio = pipe(
 
26
  num_inference_steps=num_inference_steps,
27
  controlnet_conditioning_scale=float(controlnet_conditioning_scale),
28
  guess_mode=guess_mode,
29
+ generator=generator.manual_seed(int(random_seed)),
30
  )
31
  return (16000, audio.audios.T)
32
 
 
35
  "text",
36
  gr.Textbox(label="negative prompt"),
37
  gr.Slider(0, 30, value=5, step=5, label="duration (seconds)"),
38
+ gr.Number(value=0, label="seed"),
39
  gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale"),
40
  gr.Slider(0, 50, value=20, step=0.1, label="inference steps"),
41
  gr.Checkbox(label="guess mode")
42
+ ], outputs="audio", examples=[["S00.mid", "piano", "", 10, 0, 1.0, 20, False]], cache_examples=True, title="🎹 MIDI-AudioLDM", description="MIDI-AudioLDM is a MIDI-conditioned text-to-audio model based on the project [AudioLDM](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation). The model has been conditioned using the ControlNet architecture and has been developed within Hugging Face’s [🧨 Diffusers](https://huggingface.co/docs/diffusers/) framework. Once trained, MIDI-AudioLDM accepts a MIDI file and a text prompt as inputs and returns an audio file, which is an interpretation of the MIDI based on the given text description. This enables detailed control over different musical aspects such as notes, mood and timbre.", theme=gr.themes.Default(primary_hue="blue")
43
  demo.launch()