asigalov61 commited on
Commit
fa9d7e5
·
verified ·
1 Parent(s): 87056df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -28
app.py CHANGED
@@ -21,10 +21,12 @@ from midi_to_colab_audio import midi_to_colab_audio
21
  # =================================================================================================
22
 
23
  @spaces.GPU
24
- def Generate_Rock_Song(input_melody_seed_number):
 
25
  print('=' * 70)
26
  print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
27
  start_time = reqtime.time()
 
28
 
29
  print('Loading model...')
30
 
@@ -63,7 +65,97 @@ def Generate_Rock_Song(input_melody_seed_number):
63
 
64
  print('Done!')
65
  print('=' * 70)
66
- seed_melody = seed_melodies_data[input_melody_seed_number]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  print('Input melody seed number:', input_melody_seed_number)
68
  print('-' * 70)
69
 
@@ -71,7 +163,7 @@ def Generate_Rock_Song(input_melody_seed_number):
71
 
72
  print('=' * 70)
73
 
74
- print('Sample output events', seed_melody[:16])
75
  print('=' * 70)
76
  print('Generating...')
77
 
@@ -105,44 +197,42 @@ def Generate_Rock_Song(input_melody_seed_number):
105
  song_f = []
106
 
107
  time = 0
108
- dur = 0
109
- vel = 90
110
- pitch = 0
111
  channel = 0
112
-
113
- patches = [0] * 16
114
- patches[3] = 40
 
115
 
116
  for ss in song:
117
 
118
- if 0 < ss < 128:
119
-
120
- time += (ss * 32)
121
 
 
 
122
  if 128 < ss < 256:
 
 
 
 
 
 
 
123
 
124
- dur = (ss-128) * 32
125
 
126
- if 256 < ss < 512:
127
 
128
- pitch = (ss-256) % 128
129
 
130
- channel = (ss-256) // 128
131
 
132
- if channel == 1:
133
- channel = 3
134
- vel = 110 + (pitch % 12)
135
- song_f.append(['note', time, dur, channel, pitch, vel, 40])
136
-
137
- else:
138
- vel = 80 + (pitch % 12)
139
- channel = 0
140
- song_f.append(['note', time, dur, channel, pitch, vel, 0])
141
 
142
- fn1 = "Melody2Song-Seq2Seq-Music-Transformer-Composition"
143
 
144
  detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
145
- output_signature = 'Melody2Song Seq2Seq Music Transformer',
146
  output_file_name = fn1,
147
  track_name='Project Los Angeles',
148
  list_of_MIDI_patches=patches
@@ -224,7 +314,7 @@ if __name__ == "__main__":
224
  output_plot = gr.Plot(label="Output MIDI score plot")
225
  output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
226
 
227
- run_event = run_btn.click(Generate_Rock_Song, [input_melody_seed_number],
228
  [output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
229
 
230
  gr.Examples(
 
21
  # =================================================================================================
22
 
23
  @spaces.GPU
24
+ def Generate_Rock_Song(input_midi, input_melody_seed_number):
25
+
26
  print('=' * 70)
27
  print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
28
  start_time = reqtime.time()
29
+ print('=' * 70)
30
 
31
  print('Loading model...')
32
 
 
65
 
66
  print('Done!')
67
  print('=' * 70)
68
+
69
+ #==================================================================
70
+
71
+ fn = os.path.basename(input_midi)
72
+ fn1 = fn.split('.')[0]
73
+
74
+ #===============================================================================
75
+ # Raw single-track ms score
76
+
77
+ raw_score = TMIDIX.midi2single_track_ms_score(input_midi)
78
+
79
+ #===============================================================================
80
+ # Enhanced score notes
81
+
82
+ escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]
83
+
84
+ escore_notes = [e for e in escore_notes if e[6] < 72 or e[6] == 128]
85
+
86
+ #=======================================================
87
+ # PRE-PROCESSING
88
+
89
+ #===============================================================================
90
+ # Augmented enhanced score notes
91
+
92
+ escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes, timings_divider=32, legacy_timings=True)
93
+
94
+ #===============================================================================
95
+
96
+ dscore = TMIDIX.enhanced_delta_score_notes(escore_notes)
97
+
98
+ cscore = TMIDIX.chordify_score(dscore)
99
+
100
+ #===============================================================================
101
+
102
+ score_toks = []
103
+ control_toks = []
104
+ prime_toks = []
105
+
106
+ for c in cscore:
107
+
108
+ ctime = c[0][0]
109
+
110
+ #=================================================================
111
+
112
+ chord = sorted(c, key=lambda x: -x[5])
113
+
114
+ gnotes = []
115
+ gdrums = []
116
+
117
+ for k, v in groupby(chord, key=lambda x: x[5]):
118
+ if k == 128:
119
+ gdrums.extend(sorted(v, key=lambda x: x[3], reverse=True))
120
+ else:
121
+ gnotes.append(sorted(v, key=lambda x: x[3], reverse=True))
122
+
123
+ #=================================================================
124
+
125
+ chord_toks = []
126
+ ctoks = []
127
+ ptoks = []
128
+
129
+ chord_toks.append(ctime)
130
+ ptoks.append(ctime)
131
+
132
+ if gdrums:
133
+ chord_toks.extend([e[3]+128 for e in gdrums] + [128])
134
+ ptoks.extend([e[3]+128 for e in gdrums] + [128])
135
+
136
+ else:
137
+ chord_toks.append(128)
138
+ ptoks.append(128)
139
+
140
+ if gnotes:
141
+ for g in gnotes:
142
+
143
+ durs = [e[1] // 4 for e in g]
144
+ clipped_dur = max(1, min(31, min(durs)))
145
+
146
+ chan = max(0, min(8, g[0][5] // 8))
147
+
148
+ chan_dur_tok = ((chan * 32) + clipped_dur) + 256
149
+
150
+ ctoks.append([chan_dur_tok, len(g)])
151
+
152
+ ptoks.append(chan_dur_tok)
153
+ ptoks.extend([e[3]+544 for e in g])
154
+
155
+ score_toks.append(chord_toks)
156
+ control_toks.append(ctoks)
157
+ prime_toks.append(ptoks)
158
+
159
  print('Input melody seed number:', input_melody_seed_number)
160
  print('-' * 70)
161
 
 
163
 
164
  print('=' * 70)
165
 
166
+ print('Sample output events', prime_toks[:16])
167
  print('=' * 70)
168
  print('Generating...')
169
 
 
197
  song_f = []
198
 
199
  time = 0
200
+ dur = 32
 
 
201
  channel = 0
202
+ pitch = 60
203
+ vel = 90
204
+
205
+ patches = [0, 10, 19, 24, 35, 40, 52, 56, 65, 9, 73, 46, 0, 0, 0, 0]
206
 
207
  for ss in song:
208
 
209
+ if 0 <= ss < 128:
 
 
210
 
211
+ time += ss * 32
212
+
213
  if 128 < ss < 256:
214
+
215
+ song_f.append(['note', time, 32, 9, ss-128, 110, 128])
216
+
217
+ if 256 < ss < 544:
218
+
219
+ dur = ((ss-256) % 32) * 4 * 32
220
+ channel = (ss-256) // 32
221
 
222
+ if 544 < ss < 672:
223
 
224
+ patch = channel * 8
225
 
226
+ pitch = ss-544
227
 
228
+ song_f.append(['note', time, dur, channel, pitch, vel, patch])
229
 
230
+ song_f, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(song_f)
 
 
 
 
 
 
 
 
231
 
232
+ fn1 = "Guided-Rock-Music-Transformer-Composition"
233
 
234
  detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
235
+ output_signature = 'Guided Rock Music Transformer',
236
  output_file_name = fn1,
237
  track_name='Project Los Angeles',
238
  list_of_MIDI_patches=patches
 
314
  output_plot = gr.Plot(label="Output MIDI score plot")
315
  output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
316
 
317
+ run_event = run_btn.click(Generate_Rock_Song, [input_midi, input_melody_seed_number],
318
  [output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
319
 
320
  gr.Examples(