yiwv commited on
Commit
dd75008
1 Parent(s): d6e9adb

first commit

Browse files
Files changed (3) hide show
  1. app.py +67 -0
  2. model_training.ipynb +0 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from tensorflow.keras.models import load_model
3
+ import numpy as np
4
+ import pretty_midi
5
+ import tensorflow as tf
6
+
7
+
8
+ model = load_model('model/')
9
+
10
+
11
+ def predict_next_note(notes, keras_model, temperature=1.0):
12
+ assert temperature > 0
13
+ inputs = tf.expand_dims(notes, 0)
14
+ predictions = model.predict(inputs)
15
+ pitch_logits = predictions['pitch']
16
+ step = predictions['step']
17
+ duration = predictions['duration']
18
+
19
+ pitch_logits /= temperature
20
+ pitch = tf.random.categorical(pitch_logits, num_samples=1)
21
+ pitch = tf.squeeze(pitch, axis=-1)
22
+ duration = tf.squeeze(duration, axis=-1)
23
+ step = tf.squeeze(step, axis=-1)
24
+
25
+ step = tf.maximum(0, step)
26
+ duration = tf.maximum(0, duration)
27
+
28
+ return int(pitch.numpy()), float(step.numpy()), float(duration.numpy())
29
+
30
+
31
+ def notes_to_midi(notes, out_file, instrument_name="Acoustic Grand Piano"):
32
+ pm = pretty_midi.PrettyMIDI()
33
+ instrument = pretty_midi.Instrument(program=pretty_midi.instrument_name_to_program(instrument_name))
34
+
35
+ for note_data in notes:
36
+ note = pretty_midi.Note(velocity=100, pitch=int(note_data[0]), start=note_data[3], end=note_data[4])
37
+ instrument.notes.append(note)
38
+
39
+ pm.instruments.append(instrument)
40
+ pm.write(out_file)
41
+ return out_file
42
+
43
+
44
+ def generate_music(input_text):
45
+ input_sequence = np.fromstring(input_text, sep=',')
46
+ temperature = 2.0
47
+ num_predictions = 120
48
+ generated_notes = []
49
+ prev_start = 0
50
+
51
+ for _ in range(num_predictions):
52
+ pitch, step, duration = predict_next_note(input_sequence[-3:], model, temperature)
53
+ start = prev_start + step
54
+ end = start + duration
55
+ generated_notes.append([pitch, step, duration, start, end])
56
+ input_sequence = np.append(input_sequence, [pitch, step, duration])
57
+ prev_start = start
58
+
59
+ output_file_name = 'output.mid'
60
+ notes_to_midi(generated_notes, output_file_name)
61
+
62
+ return output_file_name
63
+
64
+
65
+ # Gradioインターフェースの定義
66
+ iface = gr.Interface(fn=generate_music, inputs='text', outputs='audio')
67
+ iface.launch()
model_training.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ pyfluidsynth
3
+ pretty-midi
4
+ tensorflow