asigalov61 commited on
Commit
3d4162a
1 Parent(s): 3ac4fa8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -237
app.py CHANGED
@@ -45,9 +45,7 @@ def GenerateAccompaniment(input_midi, input_num_tokens, input_conditioning_type,
45
 
46
  escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]
47
 
48
- no_drums_escore_notes = [e for e in escore_notes if e[6] < 80]
49
-
50
- if len(no_drums_escore_notes) > 0:
51
 
52
  #=======================================================
53
  # PRE-PROCESSING
@@ -55,258 +53,76 @@ def GenerateAccompaniment(input_midi, input_num_tokens, input_conditioning_type,
55
  #===============================================================================
56
  # Augmented enhanced score notes
57
 
58
- no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes)
59
-
60
- cscore = TMIDIX.chordify_score([1000, no_drums_escore_notes])
61
-
62
- clean_cscore = []
63
-
64
- for c in cscore:
65
- pitches = []
66
- cho = []
67
- for cc in c:
68
- if cc[4] not in pitches:
69
- cho.append(cc)
70
- pitches.append(cc[4])
71
-
72
- clean_cscore.append(cho)
73
-
74
- #=======================================================
75
- # FINAL PROCESSING
76
-
77
- melody_chords = []
78
- chords = []
79
- times = [0]
80
- durs = []
81
-
82
- #=======================================================
83
- # MAIN PROCESSING CYCLE
84
- #=======================================================
85
-
86
- pe = clean_cscore[0][0]
87
-
88
- first_chord = True
89
-
90
- for c in clean_cscore:
91
-
92
- # Chords
93
-
94
- c.sort(key=lambda x: x[4], reverse=True)
95
-
96
- tones_chord = sorted(set([cc[4] % 12 for cc in c]))
97
-
98
- try:
99
- chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord)
100
- except:
101
- checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord)
102
- chord_token = TMIDIX.ALL_CHORDS_SORTED.index(checked_tones_chord)
103
-
104
- melody_chords.extend([chord_token+384])
105
-
106
- if input_strip_notes:
107
- if len(tones_chord) > 1:
108
- chords.extend([chord_token+384])
109
-
110
- else:
111
- chords.extend([chord_token+384])
112
 
113
- if first_chord:
114
- melody_chords.extend([0])
115
- first_chord = False
116
-
117
- for e in c:
118
-
119
- #=======================================================
120
- # Timings...
121
-
122
- time = e[1]-pe[1]
123
-
124
- dur = e[2]
125
-
126
- if time != 0 and time % 2 != 0:
127
- time += 1
128
- if dur % 2 != 0:
129
- dur += 1
130
-
131
- delta_time = int(max(0, min(255, time)) / 2)
132
-
133
- # Durations
134
-
135
- dur = int(max(0, min(255, dur)) / 2)
136
-
137
- # Pitches
138
 
139
- ptc = max(1, min(127, e[4]))
 
 
140
 
141
- #=======================================================
142
- # FINAL NOTE SEQ
143
 
144
- # Writing final note asynchronously
 
 
145
 
146
- if delta_time != 0:
147
- melody_chords.extend([delta_time, dur+128, ptc+256])
148
- if input_strip_notes:
149
- if len(c) > 1:
150
- times.append(delta_time)
151
- durs.append(dur+128)
152
- else:
153
- times.append(delta_time)
154
- durs.append(dur+128)
155
- else:
156
- melody_chords.extend([dur+128, ptc+256])
157
 
158
- pe = e
 
 
159
 
160
- #==================================================================
161
-
162
- print('=' * 70)
163
-
164
- print('Sample output events', melody_chords[:5])
165
- print('=' * 70)
166
- print('Generating...')
167
-
168
- output = []
169
-
170
- max_chords_limit = 8
171
- temperature=0.9
172
- num_memory_tokens=4096
173
-
174
- output = []
175
-
176
- idx = 0
177
-
178
- for c in chords[:input_num_tokens]:
179
-
180
- output.append(c)
181
 
182
- if input_conditioning_type == 'Chords-Times' or input_conditioning_type == 'Chords-Times-Durations':
183
- output.append(times[idx])
184
-
185
- if input_conditioning_type == 'Chords-Times-Durations':
186
- output.append(durs[idx])
187
-
188
- x = torch.tensor([output] * 1, dtype=torch.long, device='cuda')
189
-
190
- o = 0
191
 
192
- ncount = 0
 
 
 
 
 
 
193
 
194
- while o < 384 and ncount < max_chords_limit:
195
- with ctx:
196
- out = model.generate(x[-num_memory_tokens:],
197
- 1,
198
- temperature=temperature,
199
- return_prime=False,
200
- verbose=False)
201
 
202
- o = out.tolist()[0][0]
 
 
 
 
 
203
 
204
- if 256 <= o < 384:
205
- ncount += 1
206
-
207
- if o < 384:
208
- x = torch.cat((x, out), 1)
209
-
210
- outy = x.tolist()[0][len(output):]
211
-
212
- output.extend(outy)
213
 
214
- idx += 1
215
-
216
- if idx == len(chords[:input_num_tokens])-1:
217
- break
218
-
219
- print('=' * 70)
220
- print('Done!')
221
- print('=' * 70)
222
-
223
- #===============================================================================
224
- print('Rendering results...')
225
 
226
- print('=' * 70)
227
- print('Sample INTs', output[:12])
228
- print('=' * 70)
229
-
230
- out1 = output
231
-
232
- if len(out1) != 0:
233
-
234
- song = out1
235
- song_f = []
236
-
237
- time = 0
238
- dur = 0
239
- vel = 90
240
- pitch = 0
241
- channel = 0
242
-
243
- patches = [0] * 16
244
-
245
- channel = 0
246
-
247
- for ss in song:
248
-
249
- if 0 <= ss < 128:
250
 
251
- time += ss * 32
252
-
253
- if 128 <= ss < 256:
254
-
255
- dur = (ss-128) * 32
256
-
257
- if 256 <= ss < 384:
258
-
259
- pitch = (ss-256)
260
-
261
- vel = max(40, pitch)
262
-
263
- song_f.append(['note', time, dur, channel, pitch, vel, 0])
264
-
265
- fn1 = "Chords-Progressions-Transformer-Composition"
266
-
267
- detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
268
- output_signature = 'Chords Progressions Transformer',
269
- output_file_name = fn1,
270
- track_name='Project Los Angeles',
271
- list_of_MIDI_patches=patches
272
- )
273
-
274
- new_fn = fn1+'.mid'
275
-
276
 
277
- audio = midi_to_colab_audio(new_fn,
278
- soundfont_path=soundfont,
279
- sample_rate=16000,
280
- volume_scale=10,
281
- output_for_gradio=True
282
- )
283
-
284
- print('Done!')
285
- print('=' * 70)
286
-
287
- #========================================================
288
-
289
- output_midi_title = str(fn1)
290
- output_midi_summary = str(song_f[:3])
291
- output_midi = str(new_fn)
292
- output_audio = (16000, audio)
293
-
294
- output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True)
295
-
296
- print('Output MIDI file name:', output_midi)
297
- print('Output MIDI title:', output_midi_title)
298
- print('Output MIDI summary:', '')
299
- print('=' * 70)
300
 
301
-
302
- #========================================================
 
 
 
 
303
 
304
- print('-' * 70)
305
- print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
306
- print('-' * 70)
307
- print('Req execution time:', (reqtime.time() - start_time), 'sec')
308
-
309
- return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
310
 
311
  # =================================================================================================
312
 
 
45
 
46
  escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]
47
 
48
+ if len(escore_notes) > 0:
 
 
49
 
50
  #=======================================================
51
  # PRE-PROCESSING
 
53
  #===============================================================================
54
  # Augmented enhanced score notes
55
 
56
+ escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ #===============================================================================
59
+ # Recalculate timings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ escore_notes = TMIDIX.recalculate_score_timings(escore_notes)
62
+ #===============================================================================
63
+ # Add melody
64
 
65
+ output = TMIDIX.add_melody_to_enhanced_score_notes(escore_notes, melody_channel=15, melody_start_chord=15)
 
66
 
67
+ print('=' * 70)
68
+ print('Done!')
69
+ print('=' * 70)
70
 
71
+ #===============================================================================
72
+ print('Rendering results...')
 
 
 
 
 
 
 
 
 
73
 
74
+ print('=' * 70)
75
+ print('Sample INTs', output[:12])
76
+ print('=' * 70)
77
 
78
+ output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ fn1 = "Chords-Progressions-Transformer-Composition"
 
 
 
 
 
 
 
 
81
 
82
+ detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score,
83
+ output_signature = 'MIDI Melody',
84
+ output_file_name = fn1,
85
+ track_name='Project Los Angeles',
86
+ list_of_MIDI_patches=patches,
87
+ timings_multiplier=16
88
+ )
89
 
90
+ new_fn = fn1+'.mid'
91
+
 
 
 
 
 
92
 
93
+ audio = midi_to_colab_audio(new_fn,
94
+ soundfont_path=soundfont,
95
+ sample_rate=16000,
96
+ volume_scale=10,
97
+ output_for_gradio=True
98
+ )
99
 
100
+ print('Done!')
101
+ print('=' * 70)
 
 
 
 
 
 
 
102
 
103
+ #========================================================
 
 
 
 
 
 
 
 
 
 
104
 
105
+ output_midi_title = str(fn1)
106
+ output_midi_summary = str(song_f[:3])
107
+ output_midi = str(new_fn)
108
+ output_audio = (16000, audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
+ print('Output MIDI file name:', output_midi)
113
+ print('Output MIDI title:', output_midi_title)
114
+ print('Output MIDI summary:', '')
115
+ print('=' * 70)
116
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
+ #========================================================
119
+
120
+ print('-' * 70)
121
+ print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
122
+ print('-' * 70)
123
+ print('Req execution time:', (reqtime.time() - start_time), 'sec')
124
 
125
+ return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
 
 
 
 
 
126
 
127
  # =================================================================================================
128