haoyuliu00's picture
Initial commit with cleaned history
bf8981a
import gradio as gr
import pretty_midi
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
import cv2
import imageio
import sys
import subprocess
import os
import torch
from model import init_ldm_model
from model.model_sdf import Diffpro_SDF
from model.sampler_sdf import SDFSampler
import pickle
from train.train_params import params_chord_lsh_cond
from generation.gen_utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path = 'results/test/model_with_chord_lsh_cond_and_rhythm_onset_and_null_sep/chkpts/weights_best.pt'
chord_list = list(CHORD_DICTIONARY.keys())
def get_shape(file_path):
if file_path.endswith('.jpg'):
img = cv2.imread(file_path)
return img.shape # (height, width, channels)
elif file_path.endswith('.mp4'):
vid = imageio.get_reader(file_path)
return vid.get_meta_data()['size'] # (width, height)
else:
raise ValueError("Unsupported file type")
# Function to convert MIDI to WAV
def midi_to_wav(midi, output_file):
# Synthesize the waveform from the MIDI using pretty_midi
audio_data = midi.fluidsynth()
# Write the waveform to a WAV file
sf.write(output_file, audio_data, samplerate=44100)
def update_musescore_image(selected_prompt):
# Logic to return the correct image file based on the selected prompt
if selected_prompt == "example 1":
return "samples/diy_examples/example1/example1.jpg"
elif selected_prompt == "example 2":
return "samples/diy_examples/example2/example2.jpg"
elif selected_prompt == "example 3":
return "samples/diy_examples/example3/example3.jpg"
elif selected_prompt == "example 4":
return "samples/diy_examples/example4/example4.jpg"
elif selected_prompt == "example 5":
return "samples/diy_examples/example5/example5.jpg"
elif selected_prompt == "example 6":
return "samples/diy_examples/example6/example6.jpg"
# Model for generating music (example)
def generate_music(prompt, tempo, num_samples=1, mode="example", rhythm_control="Yes"):
ldm_model = init_ldm_model(params_chord_lsh_cond, debug_mode=False)
model = Diffpro_SDF.load_trained(ldm_model, model_path).to(device)
sampler = SDFSampler(model.ldm, 64, 64, is_autocast=False, device=device, debug_mode=False)
if mode=="example":
if prompt == "example 1":
background_condition = np.load("samples/diy_examples/example1/example1.npy")
tempo=70
elif prompt == "example 2":
background_condition = np.load("samples/diy_examples/example2/example2.npy")
elif prompt == "example 3":
background_condition = np.load("samples/diy_examples/example3/example3.npy")
elif prompt == "example 4":
background_condition = np.load("samples/diy_examples/example4/example4.npy")
background_condition = np.tile(background_condition, (num_samples,1,1,1))
background_condition = torch.Tensor(background_condition).to(device)
else:
background_condition = np.tile(prompt, (num_samples,1,1,1))
background_condition = torch.Tensor(background_condition).to(device)
if rhythm_control!="Yes":
background_condition[:,0:2] = background_condition[:,2:4]
# generate samples
output_x = sampler.generate(background_cond=background_condition, batch_size=num_samples,
same_noise_all_measure=False, X0EditFunc=X0EditFunc,
use_classifier_free_guidance=True, use_lsh=True, reduce_extra_notes=False,
rhythm_control=rhythm_control)
output_x = torch.clamp(output_x, min=0, max=1)
output_x = output_x.cpu().numpy()
# save samples
for i in range(num_samples):
full_roll = extend_piano_roll(output_x[i]) # accompaniment roll
full_chd_roll = extend_piano_roll(-background_condition[i,2:4,:,:].cpu().numpy()-1) # chord roll
full_lsh_roll = None
if background_condition.shape[1]>=6:
if background_condition[:,4:6,:,:].min()>=0:
full_lsh_roll = extend_piano_roll(background_condition[i,4:6,:,:].cpu().numpy())
midi_file = piano_roll_to_midi(full_roll, full_chd_roll, full_lsh_roll, bpm=tempo)
# filename = f'DDIM_w_rhythm_onset_0to10_{i}_edit_x0_and_eps'+'.mid'
filename = f"output_{i}.mid"
save_midi(midi_file, filename)
subprocess.Popen(['timidity',f'output_{i}.mid','-Ow','-o',f'output_{i}.wav']).communicate()
return 'output_0.mid', 'output_0.wav', midi_file
# Function to visualize MIDI notes
def visualize_midi(midi):
# Get piano roll from MIDI
roll = midi.get_piano_roll(fs=100)
# Plot the piano roll
plt.figure(figsize=(10, 4))
plt.imshow(roll, aspect='auto', origin='lower', cmap='gray_r', interpolation='nearest')
plt.title("Piano Roll")
plt.xlabel("Time")
plt.ylabel("Pitch")
plt.colorbar()
# Save the plot as an image
output_image_path = "piano_roll.png"
plt.savefig(output_image_path)
return output_image_path
def plot_rhythm(rhythm_str, label):
if rhythm_str=="null rhythm":
return None
fig, ax = plt.subplots(figsize=(6, 2))
# Ensure it's a 16-bit string
rhythm_str = rhythm_str[:16]
# Convert string to a list of 0s and 1s
rhythm = [0 if bit=="0" else 1 for bit in rhythm_str]
# Define the x axis for the 16 sixteenth notes
x = list(range(1, 17)) # 1 to 16 sixteenth notes
# Plot each note (1 as filled circle, 0 as empty circle)
for i, bit in enumerate(rhythm):
if bit == 1:
ax.scatter(i + 1, 1, color='black', s=100, label="Note" if i == 0 else "")
else:
ax.scatter(i + 1, 1, edgecolor='black', facecolor='none', s=100, label="Rest" if i == 0 else "")
# Distinguish groups of 4 using vertical dashed lines (no solid grid lines)
for i in range(4, 17, 4):
ax.axvline(x=i + 0.5, color='grey', linestyle='--')
# Remove solid vertical grid lines by setting the grid off
ax.grid(False)
# Formatting the plot
ax.set_xlim(0.5, 16.5)
ax.set_ylim(0.8, 1.2)
ax.set_xticks(x)
ax.set_yticks([])
ax.set_xlabel("16th Notes")
ax.set_title("Rhythm Pattern")
fig.savefig(f'samples/diy_examples/rhythm_plot_{label}.png')
plt.close(fig)
return f'samples/diy_examples/rhythm_plot_{label}.png'
def adjust_rhythm_string(s):
# Truncate if longer than 16 characters
if len(s) > 16:
return s[:16]
# Pad with zeros if shorter than 16 characters
else:
return s.ljust(16, '0')
def rhythm_string_to_array(s):
# Ensure the string is 16 characters long
s = s[:16].ljust(16, '0') # Truncate or pad with '0' to make it 16 characters
# Convert to numpy array, treating non-'0' as '1'
arr = np.array([1 if char != '0' else 0 for char in s], dtype=int)
arr = arr*np.array([3,1,2,1,3,1,2,1,3,1,2,1,3,1,2,1])
print(arr)
return arr
# Gradio main function
def generate_from_example(prompt):
midi_output, audio_output, midi = generate_music(prompt, tempo=80, mode="example", rhythm_control=False)
piano_roll_image = visualize_midi(midi)
return audio_output, piano_roll_image
def generate_diy(m1_chord, m2_chord, m3_chord, m4_chord,
m1_rhythm, m2_rhythm, m3_rhythm, m4_rhythm, tempo):
print("\n\n\n",m1_chord,type(m1_chord), "\n\n\n")
test_chd_roll = np.concatenate([np.tile(CHORD_DICTIONARY[m1_chord], (16, 1)),
np.tile(CHORD_DICTIONARY[m2_chord], (16, 1)),
np.tile(CHORD_DICTIONARY[m3_chord], (16, 1)),
np.tile(CHORD_DICTIONARY[m4_chord], (16, 1))])
rhythms = [m1_rhythm, m2_rhythm, m3_rhythm, m4_rhythm]
chd_roll = np.concatenate([test_chd_roll[np.newaxis,:,:], test_chd_roll[np.newaxis,:,:]], axis=0)
chd_roll = circular_extend(chd_roll)
chd_roll = -chd_roll-1
real_chd_roll = chd_roll
melody_roll = -np.ones_like(chd_roll)
if "null rhythm" not in rhythms:
rhythm_full = []
for i in range(len(rhythms)):
rhythm = adjust_rhythm_string(rhythms[i])
rhythm = rhythm_string_to_array(rhythm)
rhythm_full.append(rhythm)
rhythm_full = np.concatenate(rhythm_full, axis=0)
onset_roll = test_chd_roll*rhythm_full[:, np.newaxis]
sustain_roll = np.zeros_like(onset_roll)
no_onset_pos = np.all(onset_roll == 0, axis=-1)
sustain_roll[no_onset_pos] = test_chd_roll[no_onset_pos]
real_chd_roll = np.concatenate([onset_roll[np.newaxis,:,:], sustain_roll[np.newaxis,:,:]], axis=0)
real_chd_roll = circular_extend(real_chd_roll)
background_condition = np.concatenate([real_chd_roll, chd_roll, melody_roll], axis=0)
midi_output, audio_output, midi = generate_music(background_condition, tempo, mode="diy")
piano_roll_image = visualize_midi(midi)
return midi_output, audio_output, piano_roll_image
# Prompt list
prompt_list = ["example 1", "example 2", "example 3", "example 4"]
rhythm_list = ["null rhythm", "1010101010101010", "1011101010111010","1111101010111010","1010001010101010","1010101000101010"]
custom_css = """
.custom-row1 {
background-color: #fdebd0;
padding: 10px;
border-radius: 5px;
}
.custom-row2 {
background-color: #d1f2eb;
padding: 10px;
border-radius: 5px;
}
.custom-grey {
background-color: #f0f0f0;
padding: 10px;
border-radius: 5px;
}
.custom-purple {
background-color: #d7bde2;
padding: 10px;
border-radius: 5px;
}
.audio_waveform-container {
display: none !important;
}
"""
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("# <div style='text-align: center;font-size:40px'> Efficient Fine-Grained Guidance for Diffusion-Based Symbolic Music Generation <div style='text-align: center;'>")
gr.Markdown("<span style='font-size:25px;'> We introduce **Fine-Grained Guidance (FG)**, an efficient approach for symbolic music generation using **diffusion models**. Our method enhances guidance through:\
\n &emsp; (1) Fine-grained conditioning during training,\
\n &emsp; (2) Fine-grained control during the diffusion sampling process.\
\n In particular, **sampling control** ensures tonal accuracy in every generated sample, allowing our model to produce music with high precision, consistent rhythmic patterns,\
and even stylistic variations that align with user intent.<span>")
gr.Markdown("<span style='font-size:25px;color: red'> At the bottom of this page, we provide an interactive space for you to try our model by yourself! <span>")
gr.Markdown("\n\n\n")
gr.Markdown("# 1. Accompaniment Generation given Melody and Chord")
gr.Markdown("<span style='font-size:20px;'> In each example, the left column displays the melody provided as inputs to the model.\
The right column showcases music samples generated by the model.<span>")
with gr.Column(elem_classes="custom-row1"):
gr.Markdown("## Example 1")
with gr.Row():
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
example1_mel = gr.Audio(value="samples/diy_examples/example1/example_1_mel.wav", label="Melody", scale = 5)
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
example1_audio = gr.Audio(value="samples/diy_examples/example1/sample1.wav", label="Generated Accompaniment", scale = 5)
with gr.Column(elem_classes="custom-row2"):
gr.Markdown("## Example 2")
with gr.Row():
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
example1_mel = gr.Audio(value="samples/diy_examples/example2/example_2_mel.wav", label="Melody", scale = 5)
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
example1_audio = gr.Audio(value="samples/diy_examples/example2/sample1.wav", label="Generated Accompaniment", scale = 5)
with gr.Column(elem_classes="custom-row1"):
gr.Markdown("## Example 3")
with gr.Row():
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
example1_mel = gr.Audio(value="samples/diy_examples/example3/example_3_mel.wav", label="Melody", scale = 5)
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
example1_audio = gr.Audio(value="samples/diy_examples/example3/sample1.wav", label="Generated Accompaniment", scale = 5)
with gr.Column(elem_classes="custom-row2"):
gr.Markdown("## Example 4")
with gr.Row():
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
example1_mel = gr.Audio(value="samples/diy_examples/example4/example_4_mel.wav", label="Melody", scale = 5)
with gr.Column():
gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
example1_audio = gr.Audio(value="samples/diy_examples/example4/sample1.wav", label="Generated Accompaniment", scale = 5)
gr.HTML("<div style='height: 50px;'></div>")
gr.Markdown("# \n\n\n")
gr.Markdown("# 2. Style-Controlled Music Generation")
gr.Markdown("<span style='font-size:20px;'>Our approach enables controllable stylization in music generation. The sampling control is able to\
ensure that all generated notes strictly adhere to the target musical style's scale.\
This allows the model to generate music in specific styles — even those that were not present in \
the training data.<span>")
gr.Markdown("<span style='font-size:20px;'> Below, we demonstrate several examples of style-controlled music generation for:\
\n &emsp; (1) Dorian Mode: (with scale being A-B-C-D-E-F#-G);\
\n &emsp; (2) Chinese Style: (with scale being C-D-E-G-A). <span>")
with gr.Column(elem_classes="custom-row1"):
gr.Markdown("## Dorian Mode")
gr.Markdown("<span style='font-size:20px;'> The following are two examples generated by our method <span>")
with gr.Row():
with gr.Column(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Example 1 <span>")
example1_mel = gr.Audio(value="samples/different_styles/dorian_1.wav", scale = 5)
with gr.Column(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Example 2 <span>")
example1_audio = gr.Audio(value="samples/different_styles/dorian_2.wav", scale = 5)
with gr.Column(elem_classes="custom-row2"):
gr.Markdown("## Chinese Style")
gr.Markdown("<span style='font-size:20px;'> The following are two examples generated by our method <span>")
with gr.Row():
with gr.Column(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Example 1 <span>")
example1_mel = gr.Audio(value="samples/different_styles/chinese_1.wav", scale = 5)
with gr.Column(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Example 2 <span>")
example1_audio = gr.Audio(value="samples/different_styles/chinese_2.wav", scale = 5)
gr.HTML("<div style='height: 50px;'></div>")
gr.Markdown("\n\n\n")
gr.Markdown("# 3. Demonstrating the Effectiveness of Sampling Control by Comparison")
gr.Markdown("<span style='font-size:20px;'> We demonstrate the impact of sampling control in an **accompaniment generation** task, given a melody and chord progression.\
\n Each example generates accompaniments with and without sampling control using the same random seed, ensuring that the two results are comparable.\
\n Sampling control effectively removes or replaces harmonically conflicting notes, ensuring tonal consistency.\
\n We provide music sheets and audio files for both versions.<span>")
gr.Markdown("<span style='font-size:20px;'> Comparison of the results indicates that sampling control not only eliminates out-of-key notes but also enhances \
the overall coherence and harmonic consistency of the accompaniments.\
This highlights the effectiveness of our approach in maintaining musical coherence. <span>")
with gr.Column(elem_classes="custom-row1"):
gr.Markdown("## Example 1")
with gr.Row(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
with gr.Column(scale=2, min_width=10, ):
gr.Markdown("Melody Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=10, ):
gr.Markdown("Melody Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
gr.Markdown("## Generated Accompaniments")
with gr.Row(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Without sampling control<span>")
with gr.Column(scale=2, min_width=300):
gr.Markdown("Music Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=150):
gr.Markdown("Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
gr.Markdown("\n\n\n")
with gr.Row(elem_classes="custom-grey"):
with gr.Column(scale=1, min_width=150):
gr.Markdown("<span style='font-size:20px;'>With sampling control<span>")
with gr.Column(scale=2, min_width=300):
gr.Markdown("Music Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_acc_control.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=150):
gr.Markdown("Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
with gr.Column(elem_classes="custom-row2"):
gr.Markdown("## Example 2")
with gr.Row(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
with gr.Column(scale=2, min_width=10, ):
gr.Markdown("Melody Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=10, ):
gr.Markdown("Melody Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
gr.Markdown("## Generated Accompaniments")
with gr.Row(elem_classes="custom-grey"):
gr.Markdown("<span style='font-size:20px;'> Without sampling control<span>")
with gr.Column(scale=2, min_width=300):
gr.Markdown("Music Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_acc_uncontrol.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=150):
gr.Markdown("Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_acc_uncontrol.wav", scale = 1, min_width=10)
gr.Markdown("\n\n\n")
with gr.Row(elem_classes="custom-grey"):
with gr.Column(scale=1, min_width=150):
gr.Markdown("<span style='font-size:20px;'>With sampling control<span>")
with gr.Column(scale=2, min_width=300):
gr.Markdown("Music Sheet")
example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_acc_control.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
with gr.Column(scale=1, min_width=150):
gr.Markdown("Audio")
example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_acc_control.wav", scale = 1, min_width=10)
# with gr.Row():
# with gr.Column(scale=1, min_width=300, elem_classes="custom-row1"):
# gr.Markdown("## Example 1")
# gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# # Audio component to play the audio
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
# gr.Markdown("## Generated Accompaniments")
# with gr.Row():
# with gr.Column(scale=1, min_width=150):
# gr.Markdown("<span style='font-size:20px;'> without sampling control<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
# with gr.Column(scale=1, min_width=150):
# gr.Markdown("<span style='font-size:20px;'> with sampling control<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
# with gr.Column(scale=1, min_width=300, elem_classes="custom-row2"):
# gr.Markdown("## Example 2")
# gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# # Audio component to play the audio
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
# gr.Markdown("## Generated Accompaniments")
# with gr.Row():
# with gr.Column(scale=1, min_width=150):
# gr.Markdown("<span style='font-size:20px;'> without sampling control<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
# with gr.Column(scale=1, min_width=150):
# gr.Markdown("<span style='font-size:20px;'> with sampling control<span>")
# example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
# example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
''' Try to generate by users '''
gr.HTML("<div style='height: 50px;'></div>")
gr.Markdown("\n\n\n")
gr.Markdown("# <span style='color: red;'> 4. DIY in real time! </span>")
gr.Markdown("<span style='font-size:20px;'> Here is an interactive tool for you to try our model and generate by yourself.\
You can generate new accompaniments for given melody and chord conditions <span>")
gr.Markdown("### <span style='color: blue;'> Currently this space is supported with Hugging Face CPU and on average,\
it takes about 15 seconds to generate a 4-measure music piece. However, if other users are generating\
music at the same time, one may enter a queue, which could slow down the process significantly.\
If that happens, feel free to refresh the page. We appreciate your patience and understanding.\
</span>")
with gr.Column(elem_classes="custom-purple"):
gr.Markdown("### Select an example to generate music given melody and chord condition")
with gr.Row():
with gr.Column():
prompt_selector = gr.Dropdown(choices=prompt_list, label="Select an example", value="example 1")
gr.Markdown("### This is the melody to be conditioned on:")
condition_musescore = gr.Image("samples/diy_examples/example1/example1.jpg", label="melody, chord, and rhythm condition")
prompt_selector.change(fn=update_musescore_image, inputs=prompt_selector, outputs=condition_musescore)
with gr.Column():
generate_button = gr.Button("Generate")
gr.Markdown("### Generation results:")
audio_output = gr.Audio(label="Generated Music")
piano_roll_output = gr.Image(label="Generated Piano Roll")
generate_button.click(
fn=generate_from_example,
inputs=[prompt_selector],
outputs=[audio_output, piano_roll_output]
)
# Launch Gradio interface
if __name__ == "__main__":
demo.launch()