awacke1 commited on
Commit
ed24b9d
1 Parent(s): c7a6e60

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -0
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import note_seq
3
+ import numpy as np
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("TristanBehrens/js-fakes-4bars")
7
+ model = AutoModelForCausalLM.from_pretrained("TristanBehrens/js-fakes-4bars")
8
+
9
+ NOTE_LENGTH_16TH_120BPM = 0.25 * 60 / 120
10
+ BAR_LENGTH_120BPM = 4.0 * 60 / 120
11
+ SAMPLE_RATE=44100
12
+
13
+ def token_sequence_to_note_sequence(token_sequence, use_program=True, use_drums=True, instrument_mapper=None, only_piano=False):
14
+ if isinstance(token_sequence, str):
15
+ token_sequence = token_sequence.split()
16
+ note_sequence = empty_note_sequence()
17
+
18
+ # Render all notes.
19
+ current_program = 1
20
+ current_is_drum = False
21
+ current_instrument = 0
22
+ track_count = 0
23
+ for token_index, token in enumerate(token_sequence):
24
+
25
+ if token == "PIECE_START":
26
+ pass
27
+ elif token == "PIECE_END":
28
+ print("The end.")
29
+ break
30
+ elif token == "TRACK_START":
31
+ current_bar_index = 0
32
+ track_count += 1
33
+ pass
34
+ elif token == "TRACK_END":
35
+ pass
36
+ elif token == "KEYS_START":
37
+ pass
38
+ elif token == "KEYS_END":
39
+ pass
40
+ elif token.startswith("KEY="):
41
+ pass
42
+ elif token.startswith("INST"):
43
+ instrument = token.split("=")[-1]
44
+ if instrument != "DRUMS" and use_program:
45
+ if instrument_mapper is not None:
46
+ if instrument in instrument_mapper:
47
+ instrument = instrument_mapper[instrument]
48
+ current_program = int(instrument)
49
+ current_instrument = track_count
50
+ current_is_drum = False
51
+ if instrument == "DRUMS" and use_drums:
52
+ current_instrument = 0
53
+ current_program = 0
54
+ current_is_drum = True
55
+ elif token == "BAR_START":
56
+ current_time = current_bar_index * BAR_LENGTH_120BPM
57
+ current_notes = {}
58
+ elif token == "BAR_END":
59
+ current_bar_index += 1
60
+ pass
61
+ elif token.startswith("NOTE_ON"):
62
+ pitch = int(token.split("=")[-1])
63
+ note = note_sequence.notes.add()
64
+ note.start_time = current_time
65
+ note.end_time = current_time + 4 * NOTE_LENGTH_16TH_120BPM
66
+ note.pitch = pitch
67
+ note.instrument = current_instrument
68
+ note.program = current_program
69
+ note.velocity = 80
70
+ note.is_drum = current_is_drum
71
+ current_notes[pitch] = note
72
+ elif token.startswith("NOTE_OFF"):
73
+ pitch = int(token.split("=")[-1])
74
+ if pitch in current_notes:
75
+ note = current_notes[pitch]
76
+ note.end_time = current_time
77
+ elif token.startswith("TIME_DELTA"):
78
+ delta = float(token.split("=")[-1]) * NOTE_LENGTH_16TH_120BPM
79
+ current_time += delta
80
+ elif token.startswith("DENSITY="):
81
+ pass
82
+ elif token == "[PAD]":
83
+ pass
84
+ else:
85
+ #print(f"Ignored token {token}.")
86
+ pass
87
+
88
+ # Make the instruments right.
89
+ instruments_drums = []
90
+ for note in note_sequence.notes:
91
+ pair = [note.program, note.is_drum]
92
+ if pair not in instruments_drums:
93
+ instruments_drums += [pair]
94
+ note.instrument = instruments_drums.index(pair)
95
+
96
+ if only_piano:
97
+ for note in note_sequence.notes:
98
+ if not note.is_drum:
99
+ note.instrument = 0
100
+ note.program = 0
101
+
102
+ return note_sequence
103
+
104
+ def empty_note_sequence(qpm=120.0, total_time=0.0):
105
+ note_sequence = note_seq.protobuf.music_pb2.NoteSequence()
106
+ note_sequence.tempos.add().qpm = qpm
107
+ note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ
108
+ note_sequence.total_time = total_time
109
+ return note_sequence
110
+
111
+ def process(text):
112
+ input_ids = tokenizer.encode(text, return_tensors="pt")
113
+ generated_ids = model.generate(input_ids, max_length=500)
114
+ generated_sequence = tokenizer.decode(generated_ids[0])
115
+
116
+ # Convert text of notes to audio
117
+ note_sequence = token_sequence_to_note_sequence(generated_sequence)
118
+ synth = note_seq.midi_synth.synthesize
119
+ array_of_floats = synth(note_sequence, sample_rate=SAMPLE_RATE)
120
+ note_plot = note_seq.plot_sequence(note_sequence, False)
121
+ array_of_floats /=1.414
122
+ array_of_floats *= 32767
123
+ int16_data = array_of_floats.astype(np.int16)
124
+ return SAMPLE_RATE, int16_data
125
+
126
+ title = "Music generation with GPT-2"
127
+
128
+ iface = gr.Interface(
129
+ fn=process,
130
+ inputs=[gr.inputs.Textbox(default="PIECE_START")],
131
+ outputs=['audio'],
132
+ title=title,
133
+ examples=[["PIECE_START"], ["PIECE_START STYLE=JSFAKES GENRE=JSFAKES TRACK_START INST=48 BAR_START NOTE_ON=61"]],
134
+ article="This demo is inspired in the notebook from https://huggingface.co/TristanBehrens/js-fakes-4bars"
135
+ )
136
+
137
+ iface.launch(debug=True)