music-gen-kit / app.py
yiwv's picture
first commit
dd75008
raw
history blame
No virus
2.07 kB
import gradio as gr
from tensorflow.keras.models import load_model
import numpy as np
import pretty_midi
import tensorflow as tf
model = load_model('model/')
def predict_next_note(notes, keras_model, temperature=1.0):
assert temperature > 0
inputs = tf.expand_dims(notes, 0)
predictions = model.predict(inputs)
pitch_logits = predictions['pitch']
step = predictions['step']
duration = predictions['duration']
pitch_logits /= temperature
pitch = tf.random.categorical(pitch_logits, num_samples=1)
pitch = tf.squeeze(pitch, axis=-1)
duration = tf.squeeze(duration, axis=-1)
step = tf.squeeze(step, axis=-1)
step = tf.maximum(0, step)
duration = tf.maximum(0, duration)
return int(pitch.numpy()), float(step.numpy()), float(duration.numpy())
def notes_to_midi(notes, out_file, instrument_name="Acoustic Grand Piano"):
pm = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(program=pretty_midi.instrument_name_to_program(instrument_name))
for note_data in notes:
note = pretty_midi.Note(velocity=100, pitch=int(note_data[0]), start=note_data[3], end=note_data[4])
instrument.notes.append(note)
pm.instruments.append(instrument)
pm.write(out_file)
return out_file
def generate_music(input_text):
input_sequence = np.fromstring(input_text, sep=',')
temperature = 2.0
num_predictions = 120
generated_notes = []
prev_start = 0
for _ in range(num_predictions):
pitch, step, duration = predict_next_note(input_sequence[-3:], model, temperature)
start = prev_start + step
end = start + duration
generated_notes.append([pitch, step, duration, start, end])
input_sequence = np.append(input_sequence, [pitch, step, duration])
prev_start = start
output_file_name = 'output.mid'
notes_to_midi(generated_notes, output_file_name)
return output_file_name
# Gradioインターフェースの定義
iface = gr.Interface(fn=generate_music, inputs='text', outputs='audio')
iface.launch()