import collections import io import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from PIL import Image import note_seq import copy # Value of BPM for 1 second BPM_1_SECOND = 60 # Variables to change based on the time signature numerator = "" denominator = "" def token_sequence_to_note_sequence(token_sequence, use_program=True, use_drums=False, instrument_mapper=None, only_guitar=True): if isinstance(token_sequence, str): token_sequence = token_sequence.split() note_sequence = empty_note_sequence() # Render all notes. current_program = 1 current_is_drum = False current_instrument = 0 track_count = 0 for token_index, token in enumerate(token_sequence): if token == "PIECE_START": pass elif token == "PIECE_END": print("The end.") break elif token.startswith("TIME_SIGNATURE="): time_signature_str = token.split("=")[-1] numerator = int(time_signature_str.split("_")[0]) denominator = int(time_signature_str.split("_")[-1]) time_signature = note_sequence.time_signatures.add() time_signature.numerator = numerator time_signature.denominator = denominator elif token.startswith("BPM="): bpm_str = token.split("=")[-1] bpm = int(bpm_str) note_sequence.tempos[0].qpm = bpm pulse_duration, bar_duration = duration_in_sec( bpm, numerator, denominator ) elif token == "TRACK_START": current_bar_index = 0 track_count += 1 pass elif token == "TRACK_END": pass elif token == "KEYS_START": pass elif token == "KEYS_END": pass elif token.startswith("KEY="): pass elif token.startswith("INST"): instrument = token.split("=")[-1] if instrument != "DRUMS" and use_program: if instrument_mapper is not None: if instrument in instrument_mapper: instrument = instrument_mapper[instrument] current_program = int(instrument) current_instrument = track_count current_is_drum = False if instrument == "DRUMS" and use_drums: current_instrument = 0 current_program = 0 current_is_drum = True elif token == "BAR_START": current_time = (current_bar_index * bar_duration) current_notes = {} elif token == "BAR_END": current_bar_index += 1 pass elif token.startswith("NOTE_ON"): pitch = int(token.split("=")[-1]) note = note_sequence.notes.add() note.start_time = current_time note.end_time = current_time + denominator * pulse_duration note.pitch = pitch note.instrument = current_instrument note.program = current_program note.velocity = 80 note.is_drum = current_is_drum current_notes[pitch] = note elif token.startswith("NOTE_OFF"): pitch = int(token.split("=")[-1]) if pitch in current_notes: note = current_notes[pitch] note.end_time = current_time elif token.startswith("TIME_DELTA"): delta = float(token.split("=")[-1]) * (0.25) * pulse_duration current_time += delta elif token.startswith("DENSITY="): pass elif token == "[PAD]": pass else: #print(f"Ignored token {token}.") pass # Make the instruments right. instruments_drums = [] for note in note_sequence.notes: pair = [note.program, note.is_drum] if pair not in instruments_drums: instruments_drums += [pair] note.instrument = instruments_drums.index(pair) if only_guitar: for note in note_sequence.notes: if not note.is_drum: # Midi number for guitar is 23 note.instrument = 24 note.program = 24 return note_sequence # Calculate the duration in seconds of pulse and bar def duration_in_sec(bpm, numerator, denominator): pulse_duration = BPM_1_SECOND / bpm number_of_quarters_per_bar = (4 / denominator) * numerator bar_duration = pulse_duration * number_of_quarters_per_bar return pulse_duration, bar_duration def empty_note_sequence(qpm=120, total_time=0.0): note_sequence = note_seq.protobuf.music_pb2.NoteSequence() note_sequence.tempos.add().qpm = qpm #note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ note_sequence.total_time = total_time return note_sequence # Generate piano_roll def sequence_to_pandas_dataframe(sequence): pd_dict = collections.defaultdict(list) for note in sequence.notes: pd_dict["start_time"].append(note.start_time) pd_dict["end_time"].append(note.end_time) pd_dict["duration"].append(note.end_time - note.start_time) pd_dict["pitch"].append(note.pitch) return pd.DataFrame(pd_dict) def dataframe_to_pianoroll_img(df): fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot(111) ax.scatter(df.start_time, df.pitch, c="white") for _, row in df.iterrows(): ax.add_patch(Rectangle((row["start_time"], row["pitch"]-0.4), row["duration"], 0.4, color="black")) plt.xlabel('Seconds', fontsize=18) plt.ylabel('MIDI pitch', fontsize=16) return fig def fig2img(fig): """Convert a Matplotlib figure to a PIL Image and return it""" import io buf = io.BytesIO() fig.savefig(buf, format="png") buf.seek(0) img = Image.open(buf) return img def create_image_from_note_sequence(sequence): df_sequence = sequence_to_pandas_dataframe(sequence) fig = dataframe_to_pianoroll_img(df_sequence) img = fig2img(fig) return img