asigalov61's picture
Update app.py
5a9b440 verified
raw
history blame
7.42 kB
#=======================================================================================
# https://huggingface.co/spaces/asigalov61/Imagen-POP-Music-Medley-Diffusion-Transformer
#=======================================================================================
import os
import time as reqtime
import datetime
from pytz import timezone
import torch
from imagen_pytorch import Unet, Imagen, ImagenTrainer
from imagen_pytorch.data import Dataset
import spaces
import gradio as gr
import random
import tqdm
from midi_to_colab_audio import midi_to_colab_audio
import TMIDIX
# =================================================================================================
@spaces.GPU
def GenerateSong(input_melody_seed_number):
print('=' * 70)
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
start_time = reqtime.time()
print('Loading model...')
SEQ_LEN = 2560
PAD_IDX = 514
DEVICE = 'cuda' # 'cuda'
# instantiate the model
model = TransformerWrapper(
num_tokens = PAD_IDX+1,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 1024, depth = 24, heads = 16, attn_flash = True)
)
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)
model.to(DEVICE)
print('=' * 70)
print('Loading model checkpoint...')
model.load_state_dict(
torch.load('Melody2Song_Seq2Seq_Music_Transformer_Trained_Model_28482_steps_0.719_loss_0.7865_acc.pth',
map_location=DEVICE))
print('=' * 70)
model.eval()
if DEVICE == 'cpu':
dtype = torch.bfloat16
else:
dtype = torch.bfloat16
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype)
print('Done!')
print('=' * 70)
seed_melody = seed_melodies_data[input_melody_seed_number]
print('Input melody seed number:', input_melody_seed_number)
print('-' * 70)
#==================================================================
print('=' * 70)
print('Sample output events', seed_melody[:16])
print('=' * 70)
print('Generating...')
x = (torch.tensor(seed_melody, dtype=torch.long, device='cuda')[None, ...])
with ctx:
with torch.inference_mode():
out = model.generate(x,
1024,
temperature=0.9,
return_prime=False,
verbose=False)
output = out[0].tolist()
print('=' * 70)
print('Done!')
print('=' * 70)
#===============================================================================
print('Rendering results...')
print('=' * 70)
print('Sample INTs', output[:15])
print('=' * 70)
out1 = output
if len(out1) != 0:
song = out1
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
patches = [0] * 16
patches[3] = 40
for ss in song:
if 0 < ss < 128:
time += (ss * 32)
if 128 < ss < 256:
dur = (ss-128) * 32
if 256 < ss < 512:
pitch = (ss-256) % 128
channel = (ss-256) // 128
if channel == 1:
channel = 3
vel = 110 + (pitch % 12)
song_f.append(['note', time, dur, channel, pitch, vel, 40])
else:
vel = 80 + (pitch % 12)
channel = 0
song_f.append(['note', time, dur, channel, pitch, vel, 0])
fn1 = "Melody2Song-Seq2Seq-Music-Transformer-Composition"
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Melody2Song Seq2Seq Music Transformer',
output_file_name = fn1,
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
new_fn = fn1+'.mid'
audio = midi_to_colab_audio(new_fn,
soundfont_path=soundfont,
sample_rate=16000,
volume_scale=10,
output_for_gradio=True
)
print('Done!')
print('=' * 70)
#========================================================
output_midi_title = str(fn1)
output_midi_summary = str(song_f[:3])
output_midi = str(new_fn)
output_audio = (16000, audio)
output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True)
print('Output MIDI file name:', output_midi)
print('Output MIDI title:', output_midi_title)
print('Output MIDI summary:', output_midi_summary)
print('=' * 70)
#========================================================
print('-' * 70)
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('-' * 70)
print('Req execution time:', (reqtime.time() - start_time), 'sec')
return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
# =================================================================================================
if __name__ == "__main__":
PDT = timezone('US/Pacific')
print('=' * 70)
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('=' * 70)
soundfont = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2"
app = gr.Blocks()
with app:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Imagen POP Music Medley Diffusion Transformer</h1>")
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Generate unique POP music medleys with Imagen diffusion transformer</h1>")
gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Imagen-POP-Music-Medley-Diffusion-Transformer&style=flat)\n\n"
"This is a demo for MIDI Images dataset\n\n"
"Please see [MIDI Images](https://huggingface.co/datasets/asigalov61/MIDI-Images) Hugging Face repo for more information\n\n"
)
input_num_medley_comps = gr.Slider(1, 64, value=8, step=1, label="Number of medley compositions")
run_btn = gr.Button("Generate POP Medley", variant="primary")
gr.Markdown("## Generation results")
output_midi_title = gr.Textbox(label="Output MIDI title")
output_midi_summary = gr.Textbox(label="Output MIDI summary")
output_audio = gr.Audio(label="Output MIDI audio", format="wav", elem_id="midi_audio")
output_plot = gr.Plot(label="Output MIDI score plot")
output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
run_event = run_btn.click(GenerateSong, [input_melody_seed_number],
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
app.queue().launch()