Spaces:
Runtime error
Runtime error
import subprocess | |
import sys | |
subprocess.check_call([sys.executable,"-m","pip","install",'causal-conv1d']) | |
subprocess.check_call([sys.executable, "-m", "pip", "install", 'miditok','mamba-ssm','gradio']) | |
subprocess.check_call(["apt-get", "install", "timidity", "-y"]) | |
# !pip install pretty_midi midi2audio | |
# !pip install miditok | |
# !apt-get install fluidsynth | |
# !apt install timidity -y | |
# !pip install causal-conv1d>=1.1.0 | |
# !pip install mamba-ssm | |
# !pip install gradio | |
# !export LC_ALL="en_US.UTF-8" | |
# !export LD_LIBRARY_PATH="/usr/lib64-nvidia" | |
# !export LIBRARY_PATH="/usr/local/cuda/lib64/stubs" | |
# subprocess.check_call(['export', 'LC_ALL="en_US.UTF-8"']) | |
# subprocess.check_call(['export', 'LD_LIBRARY_PATH="/usr/lib64-nvidia"']) | |
# subprocess.check_call(['export', 'LIBRARY_PATH="/usr/local/cuda/lib64/stubs"']) | |
import os | |
os.environ['LC_ALL'] = "en_US.UTF-8" | |
os.environ['LD_LIBRARY_PATH'] = "/usr/lib64-nvidia" | |
os.environ['LIBRARY_PATH'] = "/usr/local/cuda/lib64/stubs" | |
import gradio as gr | |
import torch | |
from mamba_ssm import Mamba | |
from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel | |
from mamba_ssm.models.config_mamba import MambaConfig | |
import numpy as np | |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') | |
if torch.cuda.is_available(): | |
subprocess.check_call(['ldconfig', '/usr/lib64-nvidia']) | |
# !ldconfig /usr/lib64-nvidia | |
# !wget "https://huggingface.co/krystv/MIDI_Mamba-159M/resolve/main/MIDI_Mamba-159M_1536VS.pt" | |
# !wget "https://huggingface.co/krystv/MIDI_Mamba-159M/resolve/main/tokenizer_1536mix_BPE.json" | |
if os.path.isfile("MIDI_Mamba-159M_1536VS.pt") == False: | |
subprocess.check_call(['wget', 'https://huggingface.co/krystv/MIDI_Mamba-159M/resolve/main/MIDI_Mamba-159M_1536VS.pt']) | |
if os.path.isfile("tokenizer_1536mix_BPE.json") == False: | |
subprocess.check_call(['wget', 'https://huggingface.co/krystv/MIDI_Mamba-159M/resolve/main/tokenizer_1536mix_BPE.json']) | |
mc = MambaConfig() | |
mc.d_model = 768 | |
mc.n_layer = 42 | |
mc.vocab_size = 1536 | |
from miditok import MIDILike,REMI,TokenizerConfig | |
from pathlib import Path | |
import torch | |
tokenizer = REMI(params='tokenizer_1536mix_BPE.json') | |
mf = MambaLMHeadModel(config=mc,device=device) | |
mf.load_state_dict(torch.load("/content/MIDI_Mamba-159M_1536VS.pt",map_location=device)) | |
twitter_follow_link = "https://twitter.com/iamhemantindia" | |
instagram_follow_link = "https://instagram.com/iamhemantindia" | |
custom_html = f""" | |
<div style='text-align: center;'> | |
<a href="{twitter_follow_link}" target="_blank" style="margin-right: 5px;"> | |
<img src="https://img.icons8.com/fluent/24/000000/twitter.png" alt="Follow on Twitter"/> | |
</a> | |
<a href="{instagram_follow_link}" target="_blank"> | |
<img src="https://img.icons8.com/fluent/24/000000/instagram-new.png" alt="Follow on Instagram"/> | |
</a> | |
</div> | |
""" | |
def generate(number,top_k_selector,top_p_selector, temperature_selector): | |
input_ids = torch.tensor([[1,]]).to(device) | |
out = mf.generate( | |
input_ids=input_ids, | |
max_length=int(number), | |
temperature=temperature_selector, | |
top_p=top_p_selector, | |
top_k=top_k_selector, | |
eos_token_id=2,) | |
m = tokenizer.decode(np.array(out[0].to('cpu'))) | |
np.array(out.to('cpu')).shape | |
m.dump_midi('output.mid') | |
# !timidity output.mid -Ow -o - | ffmpeg -y -f wav -i - output.mp3 | |
timidity_cmd = ['timidity', 'output.mid', '-Ow', '-o', 'output.wav'] | |
subprocess.check_call(timidity_cmd) | |
# Then convert the WAV to MP3 using ffmpeg | |
ffmpeg_cmd = ['ffmpeg', '-y', '-f', 'wav', '-i', 'output.wav', 'output.mp3'] | |
subprocess.check_call(ffmpeg_cmd) | |
return "output.mp3" | |
# text_box = gr.Textbox(label="Enter Text") | |
def generate_and_save(number,top_k_selector,top_p_selector, temperature_selector,generate_button,custom_html_wid): | |
output_audio = generate(number,top_k_selector,top_p_selector, temperature_selector) | |
return gr.Audio(output_audio,autoplay=True),gr.File(label="Download MIDI",value="output.mid"),generate_button | |
# iface = gr.Interface(fn=generate_and_save, | |
# inputs=[number_selector,top_k_selector,top_p_selector, temperature_selector,generate_button,custom_html_wid], | |
# outputs=[output_box,download_midi_button], | |
# title="MIDI Mamba-159M",submit_btn=False, | |
# clear_btn=False, | |
# description="MIDI Mamba is a Mamba based model trained on MIDI data collected from open internet to train music model.", | |
# allow_flagging=False,) | |
with gr.Blocks() as b1: | |
gr.Markdown("<h1 style='text-align: center;'>MIDI Mamba-159M <h1/> ") | |
gr.Markdown("<h3 style='text-align: center;'>MIDI Mamba is a Mamba based model trained on MIDI data collected from open internet to train music model. <br> by Hemant Kumar<h3/>") | |
with gr.Row(): | |
with gr.Column(): | |
number_selector = gr.Number(label="Select Length of output",value=512) | |
top_p_selector = gr.Slider(label="Select Top P", minimum=0, maximum=1.0, step=0.05, value=0.9) | |
temperature_selector = gr.Slider(label="Select Temperature", minimum=0, maximum=1.0, step=0.1, value=0.9) | |
top_k_selector = gr.Slider(label="Select Top K", minimum=1, maximum=1536, step=1, value=30) | |
generate_button = gr.Button(value="Generate",variant="primary") | |
custom_html_wid = gr.HTML(custom_html) | |
with gr.Column(): | |
output_box = gr.Audio("output.mp3",autoplay=True,) | |
download_midi_button = gr.File(label="Download MIDI") | |
generate_button.click(generate_and_save,inputs=[number_selector,top_k_selector,top_p_selector, temperature_selector,generate_button,custom_html_wid],outputs=[output_box,download_midi_button,generate_button]) | |
b1.launch(share=True) |