File size: 6,229 Bytes
d13787f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a91262
 
 
 
 
 
 
 
d13787f
 
 
 
b9b510f
d13787f
14ec4f5
d13787f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import collections
import io

import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image

import note_seq
import copy

# Value of BPM for 1 second
BPM_1_SECOND = 60

# Variables to change based on the time signature
numerator = ""
denominator = ""

def token_sequence_to_note_sequence(token_sequence, 
                                    use_program=True, 
                                    use_drums=False, 
                                    instrument_mapper=None, 
                                    only_guitar=True):

    if isinstance(token_sequence, str):
        token_sequence = token_sequence.split()

    note_sequence = empty_note_sequence()

    # Render all notes.
    current_program = 1
    current_is_drum = False
    current_instrument = 0
    track_count = 0
    for token_index, token in enumerate(token_sequence):

        if token == "PIECE_START":
            pass
        elif token == "PIECE_END":
            print("The end.")
            break
        elif token.startswith("TIME_SIGNATURE="):
            time_signature_str = token.split("=")[-1]
            numerator = int(time_signature_str.split("_")[0])
            denominator = int(time_signature_str.split("_")[-1])
            time_signature = note_sequence.time_signatures.add()
            time_signature.numerator = numerator
            time_signature.denominator = denominator
        elif token.startswith("BPM="):
            bpm_str = token.split("=")[-1]
            bpm = int(bpm_str)
            note_sequence.tempos[0].qpm = bpm
            pulse_duration, bar_duration = duration_in_sec(
                bpm, numerator, denominator
            )
        elif token == "TRACK_START":
            current_bar_index = 0
            track_count += 1
            pass
        elif token == "TRACK_END":
            pass
        elif token == "KEYS_START":
            pass
        elif token == "KEYS_END":
            pass
        elif token.startswith("KEY="):
            pass
        elif token.startswith("INST"):
            instrument = token.split("=")[-1]
            if instrument != "DRUMS" and use_program:
                if instrument_mapper is not None:
                    if instrument in instrument_mapper:
                        instrument = instrument_mapper[instrument]
                current_program = int(instrument)
                current_instrument = track_count
                current_is_drum = False
            if instrument == "DRUMS" and use_drums:
                current_instrument = 0
                current_program = 0
                current_is_drum = True
        elif token == "BAR_START":
            current_time = (current_bar_index * bar_duration)
            current_notes = {}
        elif token == "BAR_END":
            current_bar_index += 1
            pass
        elif token.startswith("NOTE_ON"):
            pitch = int(token.split("=")[-1])
            note = note_sequence.notes.add()
            note.start_time = current_time
            note.end_time = current_time + denominator * pulse_duration
            note.pitch = pitch
            note.instrument = current_instrument
            note.program = current_program
            note.velocity = 80
            note.is_drum = current_is_drum
            current_notes[pitch] = note
        elif token.startswith("NOTE_OFF"):
            pitch = int(token.split("=")[-1])
            if pitch in current_notes:
                note = current_notes[pitch]
                note.end_time = current_time
        elif token.startswith("TIME_DELTA"):
            delta = float(token.split("=")[-1]) * (0.25) * pulse_duration
            current_time += delta
        elif token.startswith("DENSITY="):
            pass
        elif token == "[PAD]":
            pass
        else:
            #print(f"Ignored token {token}.")
            pass

    # Make the instruments right.
    instruments_drums = []
    for note in note_sequence.notes:
        pair = [note.program, note.is_drum]
        if pair not in instruments_drums:
            instruments_drums += [pair]
        note.instrument = instruments_drums.index(pair)

    if only_guitar:
        for note in note_sequence.notes:
            if not note.is_drum:
                # Midi number for guitar is 23
                note.instrument = 24
                note.program = 24

    return note_sequence

# Calculate the duration in seconds of pulse and bar
def duration_in_sec(bpm, numerator, denominator):
    pulse_duration = BPM_1_SECOND / bpm
    number_of_quarters_per_bar = (4 / denominator) * numerator
    bar_duration = pulse_duration * number_of_quarters_per_bar
    return pulse_duration, bar_duration

def empty_note_sequence(qpm=120, total_time=0.0):
    note_sequence = note_seq.protobuf.music_pb2.NoteSequence()
    note_sequence.tempos.add().qpm = qpm
    #note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ
    note_sequence.total_time = total_time
    return note_sequence

# Generate piano_roll
def sequence_to_pandas_dataframe(sequence):
    pd_dict = collections.defaultdict(list)
    for note in sequence.notes:
        pd_dict["start_time"].append(note.start_time)
        pd_dict["end_time"].append(note.end_time)
        pd_dict["duration"].append(note.end_time - note.start_time)
        pd_dict["pitch"].append(note.pitch)
        
    return pd.DataFrame(pd_dict)

def dataframe_to_pianoroll_img(df):
    fig = plt.figure(figsize=(8, 5))
    ax = fig.add_subplot(111)
    ax.scatter(df.start_time, df.pitch, c="white")
    for _, row in df.iterrows():
        ax.add_patch(Rectangle((row["start_time"], row["pitch"]-0.4), row["duration"], 0.4, color="black"))
    plt.xlabel('Seconds', fontsize=18)
    plt.ylabel('MIDI pitch', fontsize=16)
    return fig

def fig2img(fig):
    """Convert a Matplotlib figure to a PIL Image and return it"""
    import io
    buf = io.BytesIO()
    fig.savefig(buf, format="png")
    buf.seek(0)
    img = Image.open(buf)
    return img

def create_image_from_note_sequence(sequence):
    df_sequence = sequence_to_pandas_dataframe(sequence)
    fig = dataframe_to_pianoroll_img(df_sequence)
    img = fig2img(fig)
    return img