josuelmet commited on
Commit
c5b6ebe
1 Parent(s): bad17f3

Upload _Generation.py

Browse files
Files changed (1) hide show
  1. _Generation.py +662 -0
_Generation.py ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import guitarpro
2
+ from guitarpro import *
3
+ from matplotlib import pyplot as plt
4
+ import mgzip
5
+ import numpy as np
6
+ import os
7
+ from os.path import join
8
+ import pickle
9
+ from tqdm import tqdm
10
+
11
+ import tensorflow as tf
12
+ from tensorflow import keras
13
+ from keras.callbacks import ModelCheckpoint
14
+ from keras.models import Sequential
15
+ from keras.layers import Activation, Dense, LSTM, Dropout, Flatten
16
+
17
+ from _Decompressor import SongWriter
18
+
19
+
20
+
21
+ # Define some constants:
22
+
23
+
24
+ # PITCH[i] = the pitch associated with midi note number i.
25
+ # For example, PITCH[69] = 'A4'
26
+ PITCH = {val : str(GuitarString(number=0, value=val)) for val in range(128)}
27
+ # MIDI[string] = the midi number associated with the note described by string.
28
+ # For example, MIDI['A4'] = 69.
29
+ MIDI = {str(GuitarString(number=0, value=val)) : val for val in range(128)}
30
+
31
+
32
+
33
+
34
+
35
+
36
+ # Generation helper methods:
37
+ def thirty_seconds_to_duration(count):
38
+ if count % 3 == 0:
39
+ # If the note is dotted, do 32 / (i * 2/3), and return isDotted = True.
40
+ return (48//count, True)
41
+ else:
42
+ # If the note is not dotted, to 32 / i, and return isDotted = False.
43
+ return (32//count, False)
44
+
45
+
46
+ def quantize_thirty_seconds(value):
47
+
48
+ # 32nd-note values of each fundamental type of note (not including 64th-notes, of course).
49
+ vals = np.array([32, # whole
50
+ 24, # dotted half
51
+ 16, # half
52
+ 12, # dotted quarter
53
+ 8, # quarter
54
+ 6, # dotted eigth
55
+ 4, # eigth
56
+ 3, # dotted sixteenth
57
+ 2, # sixteenth
58
+ 1]) # thirty-second
59
+
60
+ list_out = []
61
+
62
+ for v in vals:
63
+ if v <= value:
64
+ list_out.append(thirty_seconds_to_duration(v))
65
+ value -= v
66
+
67
+ return np.array(list_out)
68
+
69
+
70
+
71
+
72
+ def adjust_to_4_4(prediction_output):
73
+ '''
74
+ Adjust prediction output to be in 4/4 time.
75
+ Then, separate the beats into measures.
76
+ '''
77
+
78
+ # This will be the prediction output
79
+ new_prediction_output = []
80
+
81
+
82
+ time = 0
83
+ for beat in prediction_output:
84
+
85
+ # Calculate the fraction of a measure encompassed by the current beat / chord.
86
+ beat_time = (1 / beat[1]) * (1 + 0.5 * beat[2])
87
+
88
+ # Calculate the fraction of a measure taken up by all notes in the measure.
89
+ # Calculate any residual time to see if this measure (in 4/4 time) is longer than 1 measure.
90
+ measure_time = time + beat_time
91
+ leftover_time = (measure_time) % 1
92
+
93
+ # If the measure count (i.e., the measure integer) has changed and there is significant left-over beat time:
94
+ if (int(measure_time) > int(time)) and (leftover_time > 1/128):
95
+
96
+ # Calculate the initial 32nd notes encompassed by this beat in the current measure.
97
+ this_measure_thirty_seconds = int(32 * (1 - time % 1))
98
+ # Calculate the remaining 32nd notes encompassed by this beat in the next measure.
99
+ next_measure_thirty_seconds = int(32 * leftover_time)
100
+
101
+ # Get the Duration object parameters for this measure and the next measure.
102
+ this_measure_durations = quantize_thirty_seconds(this_measure_thirty_seconds)
103
+ next_measure_durations = quantize_thirty_seconds(next_measure_thirty_seconds)
104
+
105
+
106
+ #print(f'{{ {32 / beat[1]}')
107
+ for duration_idx, duration in enumerate(this_measure_durations):
108
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
109
+
110
+ #print(time, '\t', time * 32)
111
+
112
+ chord = beat[0] if duration_idx == 0 else 'tied'
113
+
114
+ new_prediction_output.append((chord, duration[0], duration[1], beat[3]))
115
+
116
+
117
+ for duration in next_measure_durations:
118
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
119
+
120
+ #print(time, '\t', time * 32)
121
+
122
+ new_prediction_output.append(('tied', duration[0], duration[1], beat[3]))
123
+
124
+
125
+ continue
126
+
127
+
128
+ time += beat_time
129
+ new_prediction_output.append((beat[0], beat[1], beat[2], beat[3]))
130
+
131
+ #print(time, '\t', time * 32)
132
+
133
+
134
+ '''
135
+ # Code for debugging
136
+
137
+ time = 0
138
+ time2 = 0
139
+ idx = 0
140
+
141
+ for idx2, beat2 in enumerate(new_prediction_output[:100]):
142
+ beat = prediction_output[idx]
143
+
144
+ if time == time2:
145
+ print(beat[0], '\t', time, '\t\t', beat2[0], '\t', time2)
146
+
147
+ idx += 1
148
+
149
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
150
+
151
+ else:
152
+ print('\t\t\t\t', beat2[0], '\t', time2)
153
+
154
+
155
+
156
+ time2 += (1 / beat2[1]) * (1 + 0.5 * beat2[2])
157
+ ''';
158
+
159
+ # Use the previously calculated cumulative time as the number of measures in the new 4/4 song.
160
+ num_measures = int(np.ceil(time))
161
+
162
+ song = np.empty(num_measures, dtype=object)
163
+
164
+ time = 0
165
+ m_idx = 0
166
+
167
+ timestamps = []
168
+
169
+ for beat in new_prediction_output:
170
+ #print(time)
171
+ timestamps.append(time)
172
+
173
+ m_idx = int(time)
174
+
175
+ if song[m_idx] is None:
176
+
177
+ song[m_idx] = [beat]
178
+ else:
179
+ song[m_idx].append(beat)
180
+
181
+
182
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
183
+
184
+
185
+ print(f'4/4 adjusted correctly: {set(range(num_measures)).issubset(set(timestamps))}')
186
+
187
+ return song
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+ class Generator:
196
+ def __init__(self, num_tracks_to_generate=5, as_fingerings=True, sequence_length=100):
197
+ with mgzip.open(join('data', 'notes_data.pickle.gz'), 'rb') as filepath:
198
+ self.notes = pickle.load(filepath)
199
+ self.note_to_int = pickle.load(filepath)
200
+ self.int_to_note = pickle.load(filepath)
201
+ self.n_vocab = pickle.load(filepath)
202
+ self.NUM_TRACKS_TO_GENERATE = num_tracks_to_generate
203
+ self.as_fingerings = as_fingerings
204
+ self.sequence_length = sequence_length
205
+
206
+ with mgzip.open(join('data', 'track_data.pickle.gz'), 'rb') as filepath:
207
+ self.track_data = pickle.load(filepath)
208
+
209
+ self.model = keras.models.load_model('minigpt')
210
+
211
+ self.ints = np.array([self.note_to_int[x] for x in self.notes])
212
+
213
+
214
+
215
+ def generate_track(self, track_idx=None):
216
+
217
+ if track_idx is None:
218
+ # Choose a random track
219
+ track_idx = np.random.choice(len(self.track_data))
220
+
221
+ # Get the note indices corresponding to the beginning and ending of the track
222
+ song_note_idx_first = self.track_data.loc[track_idx]['noteStartIdx']
223
+ song_note_idx_last = self.track_data.loc[track_idx+1]['noteStartIdx']
224
+
225
+ # Choose a random starting point within the track
226
+ start_idx = np.random.randint(low=song_note_idx_first,
227
+ high=song_note_idx_last)
228
+
229
+ # Choose a number of initial notes to select from the track, at most 100.
230
+ #num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
231
+ num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
232
+
233
+ # Select the initial notes (tokens)
234
+ start_tokens = [_ for _ in self.ints[start_idx:start_idx+num_initial_notes]]
235
+
236
+
237
+ max_tokens = 100
238
+
239
+
240
+
241
+ def sample_from(logits, top_k=10):
242
+ logits, indices = tf.math.top_k(logits, k=top_k, sorted=True)
243
+ indices = np.asarray(indices).astype("int32")
244
+ preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]
245
+ preds = np.asarray(preds).astype("float32")
246
+ return np.random.choice(indices, p=preds)
247
+
248
+ num_tokens_generated = 0
249
+ tokens_generated = []
250
+
251
+ while num_tokens_generated <= max_tokens:
252
+ pad_len = self.sequence_length - len(start_tokens)
253
+ sample_index = len(start_tokens) - 1
254
+ if pad_len < 0:
255
+ x = start_tokens[:self.sequence_length]
256
+ sample_index = self.sequence_length - 1
257
+ elif pad_len > 0:
258
+ x = start_tokens + [0] * pad_len
259
+ else:
260
+ x = start_tokens
261
+ x = np.array([x])
262
+ y, _ = self.model.predict(x)
263
+ sample_token = sample_from(y[0][sample_index])
264
+ tokens_generated.append(sample_token)
265
+ start_tokens.append(sample_token)
266
+ num_tokens_generated = len(tokens_generated)
267
+
268
+ generated_notes = [self.int_to_note[num] for num in np.concatenate((start_tokens, tokens_generated))]
269
+
270
+ return track_idx, generated_notes
271
+
272
+
273
+
274
+ def generate_track_batch(self, artist=None):
275
+
276
+ self.track_indices = np.zeros(self.NUM_TRACKS_TO_GENERATE)
277
+ self.tracks = np.zeros(self.NUM_TRACKS_TO_GENERATE, dtype=object)
278
+
279
+
280
+ for i in tqdm(range(self.NUM_TRACKS_TO_GENERATE)):
281
+ if artist is None:
282
+ idx, t = self.generate_track()
283
+ else:
284
+ idx, t = self.generate_track(track_idx=np.random.choice(list(self.track_data[self.track_data.artist==artist].index)))
285
+ self.track_indices[i] = idx
286
+ self.tracks[i] = t
287
+
288
+
289
+
290
+ def save_tracks(self, filepath='_generation.gp5'):
291
+
292
+ songWriter = SongWriter(initialTempo=self.track_data.loc[self.track_indices[0]]['tempo'])
293
+
294
+ for idx in range(len(self.tracks)):
295
+ new_track = adjust_to_4_4(self.tracks[idx])
296
+
297
+ # Get the tempo and tuning (lowest string note) of the song:
298
+ #print( track_data.loc[track_indices[idx]])
299
+ tempo = self.track_data.loc[self.track_indices[idx]]['tempo']
300
+ instrument = self.track_data.loc[self.track_indices[idx]]['instrument']
301
+ name = self.track_data.loc[self.track_indices[idx]]['song']
302
+ lowest_string = self.track_data.loc[self.track_indices[idx]]['tuning']
303
+
304
+ if not self.as_fingerings:
305
+ # Get all the unique pitch values from the new track
306
+ pitchnames = set.union(*[set([beat[0].split('_')[0] for beat in measure]) for measure in new_track])
307
+ pitchnames.discard('rest') # Ignore rests
308
+ pitchnames.discard('tied') # Ignore tied notes
309
+ pitchnames.discard('dead') # Ignore dead/ghost notes
310
+ lowest_string = min([MIDI[pitch] for pitch in pitchnames]) # Get the lowest MIDI value / pitch
311
+ lowest_string = min(lowest_string, MIDI['E2']) # Don't allow any tunings higher than standard.
312
+
313
+
314
+ # Standard tuning
315
+ tuning = {1: MIDI['E4'],
316
+ 2: MIDI['B3'],
317
+ 3: MIDI['G3'],
318
+ 4: MIDI['D3'],
319
+ 5: MIDI['A2'],
320
+ 6: MIDI['E2']}
321
+
322
+ if lowest_string <= MIDI['B1']:
323
+ # 7-string guitar case
324
+ tuning[7] = MIDI['B1']
325
+ downtune = MIDI['B1'] - lowest_string
326
+ else:
327
+ # downtune the tuning by however much is necessary.
328
+ downtune = MIDI['E2'] - lowest_string
329
+
330
+ tuning = {k: v - downtune for k, v in tuning.items()} # Adjust to the new tuning
331
+
332
+ # Write the track to the song writer
333
+ songWriter.decompress_track(new_track, tuning, tempo=tempo, instrument=instrument, name=name, as_fingerings=self.as_fingerings)
334
+
335
+
336
+
337
+ songWriter.write(filepath)
338
+ print('Finished')
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+
348
+ '''
349
+
350
+
351
+ def init_generator():
352
+ global NUM_TRACKS_TO_GENERATE, notes, note_to_int, int_to_note, n_vocab, track_data, model, ints
353
+
354
+ with mgzip.open('data\\notes_data.pickle.gz', 'rb') as filepath:
355
+ notes = pickle.load(filepath)
356
+ note_to_int = pickle.load(filepath)
357
+ int_to_note = pickle.load(filepath)
358
+ n_vocab = pickle.load(filepath)
359
+
360
+ with mgzip.open('data\\track_data.pickle.gz', 'rb') as filepath:
361
+ track_data = pickle.load(filepath)
362
+
363
+ #with mgzip.open('output\\generated_songs.pickle.gz', 'rb') as filepath:
364
+ # track_indices = pickle.load(filepath)
365
+ # tracks = pickle.load(filepath)
366
+
367
+ model = keras.models.load_model('minigpt')
368
+
369
+ ints = np.array([note_to_int[x] for x in notes])
370
+
371
+
372
+
373
+
374
+ def generate_track(track_idx=None):
375
+ global track_data, ints, int_to_note
376
+
377
+ if track_idx is None:
378
+ # Choose a random track
379
+ track_idx = np.random.choice(len(track_data))
380
+
381
+ # Get the note indices corresponding to the beginning and ending of the track
382
+ song_note_idx_first = track_data.loc[track_idx]['noteStartIdx']
383
+ song_note_idx_last = track_data.loc[track_idx+1]['noteStartIdx']
384
+
385
+ # Choose a random starting point within the track
386
+ start_idx = np.random.randint(low=song_note_idx_first,
387
+ high=song_note_idx_last)
388
+
389
+ # Choose a number of initial notes to select from the track, at most 100.
390
+ #num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
391
+ num_initial_notes = np.random.choice(min(100, song_note_idx_last - start_idx))
392
+
393
+ # Select the initial notes (tokens)
394
+ start_tokens = [_ for _ in ints[start_idx:start_idx+num_initial_notes]]
395
+
396
+
397
+ max_tokens = 100
398
+
399
+
400
+
401
+ def sample_from(logits, top_k=10):
402
+ logits, indices = tf.math.top_k(logits, k=top_k, sorted=True)
403
+ indices = np.asarray(indices).astype("int32")
404
+ preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]
405
+ preds = np.asarray(preds).astype("float32")
406
+ return np.random.choice(indices, p=preds)
407
+
408
+ num_tokens_generated = 0
409
+ tokens_generated = []
410
+
411
+ while num_tokens_generated <= max_tokens:
412
+ pad_len = maxlen - len(start_tokens)
413
+ sample_index = len(start_tokens) - 1
414
+ if pad_len < 0:
415
+ x = start_tokens[:maxlen]
416
+ sample_index = maxlen - 1
417
+ elif pad_len > 0:
418
+ x = start_tokens + [0] * pad_len
419
+ else:
420
+ x = start_tokens
421
+ x = np.array([x])
422
+ y, _ = model.predict(x)
423
+ sample_token = sample_from(y[0][sample_index])
424
+ tokens_generated.append(sample_token)
425
+ start_tokens.append(sample_token)
426
+ num_tokens_generated = len(tokens_generated)
427
+
428
+ generated_notes = [int_to_note[num] for num in np.concatenate((start_tokens, tokens_generated))]
429
+
430
+ return track_idx, generated_notes
431
+
432
+
433
+
434
+
435
+ def generate_track_batch(artist=None):
436
+ global track_indices, tracks, NUM_TRACKS_TO_GENERATE, track_data
437
+
438
+ track_indices = np.zeros(NUM_TRACKS_TO_GENERATE)
439
+ tracks = np.zeros(NUM_TRACKS_TO_GENERATE, dtype=object)
440
+
441
+
442
+ for i in tqdm(range(NUM_TRACKS_TO_GENERATE)):
443
+ if artist is None:
444
+ idx, t = generate_track()
445
+ else:
446
+ idx, t = generate_track(track_idx=np.random.choice(list(track_data[track_data.artist==artist].index)))
447
+ track_indices[i] = idx
448
+ tracks[i] = t
449
+
450
+
451
+
452
+
453
+
454
+ # Generation helper methods:
455
+ def thirty_seconds_to_duration(count):
456
+ if count % 3 == 0:
457
+ # If the note is dotted, do 32 / (i * 2/3), and return isDotted = True.
458
+ return (48//count, True)
459
+ else:
460
+ # If the note is not dotted, to 32 / i, and return isDotted = False.
461
+ return (32//count, False)
462
+
463
+
464
+ def quantize_thirty_seconds(value):
465
+
466
+ # 32nd-note values of each fundamental type of note (not including 64th-notes, of course).
467
+ vals = np.array([32, # whole
468
+ 24, # dotted half
469
+ 16, # half
470
+ 12, # dotted quarter
471
+ 8, # quarter
472
+ 6, # dotted eigth
473
+ 4, # eigth
474
+ 3, # dotted sixteenth
475
+ 2, # sixteenth
476
+ 1]) # thirty-second
477
+
478
+ list_out = []
479
+
480
+ for v in vals:
481
+ if v <= value:
482
+ list_out.append(thirty_seconds_to_duration(v))
483
+ value -= v
484
+
485
+ return np.array(list_out)
486
+
487
+
488
+
489
+
490
+ def adjust_to_4_4(prediction_output):
491
+
492
+ #Adjust prediction output to be in 4/4 time.
493
+ #Then, separate the beats into measures.
494
+
495
+
496
+ # This will be the prediction output
497
+ new_prediction_output = []
498
+
499
+
500
+ time = 0
501
+ for beat in prediction_output:
502
+
503
+ # Calculate the fraction of a measure encompassed by the current beat / chord.
504
+ beat_time = (1 / beat[1]) * (1 + 0.5 * beat[2])
505
+
506
+ # Calculate the fraction of a measure taken up by all notes in the measure.
507
+ # Calculate any residual time to see if this measure (in 4/4 time) is longer than 1 measure.
508
+ measure_time = time + beat_time
509
+ leftover_time = (measure_time) % 1
510
+
511
+ # If the measure count (i.e., the measure integer) has changed and there is significant left-over beat time:
512
+ if (int(measure_time) > int(time)) and (leftover_time > 1/128):
513
+
514
+ # Calculate the initial 32nd notes encompassed by this beat in the current measure.
515
+ this_measure_thirty_seconds = int(32 * (1 - time % 1))
516
+ # Calculate the remaining 32nd notes encompassed by this beat in the next measure.
517
+ next_measure_thirty_seconds = int(32 * leftover_time)
518
+
519
+ # Get the Duration object parameters for this measure and the next measure.
520
+ this_measure_durations = quantize_thirty_seconds(this_measure_thirty_seconds)
521
+ next_measure_durations = quantize_thirty_seconds(next_measure_thirty_seconds)
522
+
523
+
524
+ #print(f'{{ {32 / beat[1]}')
525
+ for duration_idx, duration in enumerate(this_measure_durations):
526
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
527
+
528
+ #print(time, '\t', time * 32)
529
+
530
+ chord = beat[0] if duration_idx == 0 else 'tied'
531
+
532
+ new_prediction_output.append((chord, duration[0], duration[1]))
533
+
534
+
535
+ for duration in next_measure_durations:
536
+ time += (1 / duration[0]) * (1 + 0.5 * duration[1])
537
+
538
+ #print(time, '\t', time * 32)
539
+
540
+ new_prediction_output.append(('tied', duration[0], duration[1]))
541
+
542
+
543
+ continue
544
+
545
+
546
+ time += beat_time
547
+ new_prediction_output.append((beat[0], beat[1], beat[2]))
548
+
549
+ #print(time, '\t', time * 32)
550
+
551
+
552
+
553
+ # Code for debugging
554
+
555
+ #time = 0
556
+ #time2 = 0
557
+ #idx = 0
558
+
559
+ #for idx2, beat2 in enumerate(new_prediction_output[:100]):
560
+ # beat = prediction_output[idx]
561
+
562
+ # if time == time2:
563
+ # print(beat[0], '\t', time, '\t\t', beat2[0], '\t', time2)
564
+
565
+ # idx += 1
566
+
567
+ # time += (1 / beat[1]) * (1 + 0.5 * beat[2])
568
+
569
+ # else:
570
+ # print('\t\t\t\t', beat2[0], '\t', time2)
571
+
572
+
573
+
574
+ # time2 += (1 / beat2[1]) * (1 + 0.5 * beat2[2])
575
+
576
+
577
+ # Use the previously calculated cumulative time as the number of measures in the new 4/4 song.
578
+ num_measures = int(np.ceil(time))
579
+
580
+ song = np.empty(num_measures, dtype=object)
581
+
582
+ time = 0
583
+ m_idx = 0
584
+
585
+ timestamps = []
586
+
587
+ for beat in new_prediction_output:
588
+ #print(time)
589
+ timestamps.append(time)
590
+
591
+ m_idx = int(time)
592
+
593
+ if song[m_idx] is None:
594
+
595
+ song[m_idx] = [beat]
596
+ else:
597
+ song[m_idx].append(beat)
598
+
599
+
600
+ time += (1 / beat[1]) * (1 + 0.5 * beat[2])
601
+
602
+
603
+ print(f'4/4 adjusted correctly: {set(range(num_measures)).issubset(set(timestamps))}')
604
+
605
+ return song
606
+
607
+
608
+
609
+
610
+
611
+
612
+ def save_tracks(filepath='_generation.gp5'):
613
+ global track_data, track_indice, tracks
614
+
615
+ songWriter = SongWriter(initialTempo=track_data.loc[track_indices[0]]['tempo'])
616
+
617
+ for idx in range(len(tracks)):
618
+ new_track = adjust_to_4_4(tracks[idx])
619
+
620
+ # Get the tempo and tuning (lowest string note) of the song:
621
+ #print( track_data.loc[track_indices[idx]])
622
+ tempo = track_data.loc[track_indices[idx]]['tempo']
623
+ instrument = track_data.loc[track_indices[idx]]['instrument']
624
+ name = track_data.loc[track_indices[idx]]['song']
625
+ lowest_string = track_data.loc[track_indices[idx]]['tuning']
626
+
627
+ if not as_fingerings:
628
+ # Get all the unique pitch values from the new track
629
+ pitchnames = set.union(*[set([beat[0].split('_')[0] for beat in measure]) for measure in new_track])
630
+ pitchnames.discard('rest') # Ignore rests
631
+ pitchnames.discard('tied') # Ignore tied notes
632
+ pitchnames.discard('dead') # Ignore dead/ghost notes
633
+ lowest_string = min([MIDI[pitch] for pitch in pitchnames]) # Get the lowest MIDI value / pitch
634
+ lowest_string = min(lowest_string, MIDI['E2']) # Don't allow any tunings higher than standard.
635
+
636
+
637
+ # Standard tuning
638
+ tuning = {1: MIDI['E4'],
639
+ 2: MIDI['B3'],
640
+ 3: MIDI['G3'],
641
+ 4: MIDI['D3'],
642
+ 5: MIDI['A2'],
643
+ 6: MIDI['E2']}
644
+
645
+ if lowest_string <= MIDI['B1']:
646
+ # 7-string guitar case
647
+ tuning[7] = MIDI['B1']
648
+ downtune = MIDI['B1'] - lowest_string
649
+ else:
650
+ # downtune the tuning by however much is necessary.
651
+ downtune = MIDI['E2'] - lowest_string
652
+
653
+ tuning = {k: v - downtune for k, v in tuning.items()} # Adjust to the new tuning
654
+
655
+ # Write the track to the song writer
656
+ songWriter.decompress_track(new_track, tuning, tempo=tempo, instrument=instrument, name=name, as_fingerings=as_fingerings)
657
+
658
+
659
+
660
+ songWriter.write(filepath)
661
+ print('Finished')
662
+ '''