|
"""Utility functions for handling MIDI data in an easy to read/manipulate |
|
format |
|
|
|
""" |
|
from __future__ import print_function |
|
from warnings import warn |
|
|
|
import mido |
|
import numpy as np |
|
import math |
|
import warnings |
|
import collections |
|
import copy |
|
import functools |
|
import six |
|
import pathlib |
|
from heapq import merge |
|
import io |
|
|
|
import os |
|
import pkg_resources |
|
|
|
from .instrument import Instrument |
|
from .containers import (KeySignature, TimeSignature, Lyric, Note, |
|
PitchBend, ControlChange, Text) |
|
from .utilities import (key_name_to_key_number, qpm_to_bpm, note_number_to_hz) |
|
from .fluidsynth import get_fluidsynth_instance |
|
|
|
|
|
MAX_TICK = 1e7 |
|
|
|
|
|
class PrettyMIDI(object): |
|
"""A container for MIDI data in an easily-manipulable format. |
|
|
|
Parameters |
|
---------- |
|
midi_file : str or file |
|
Path or file pointer to a MIDI file. |
|
Default ``None`` which means create an empty class with the supplied |
|
values for resolution and initial tempo. |
|
resolution : int |
|
Resolution of the MIDI data, when no file is provided. |
|
initial_tempo : float |
|
Initial tempo for the MIDI data, when no file is provided. |
|
charset : str |
|
Charset of the MIDI. |
|
|
|
Attributes |
|
---------- |
|
instruments : list |
|
List of :class:`pretty_midi.Instrument` objects. |
|
key_signature_changes : list |
|
List of :class:`pretty_midi.KeySignature` objects. |
|
time_signature_changes : list |
|
List of :class:`pretty_midi.TimeSignature` objects. |
|
lyrics : list |
|
List of :class:`pretty_midi.Lyric` objects. |
|
text_events : list |
|
List of :class:`pretty_midi.Text` objects. |
|
""" |
|
|
|
def __init__(self, midi_file=None, resolution=220, initial_tempo=120., charset='latin1'): |
|
"""Initialize either by populating it with MIDI data from a file or |
|
from scratch with no data. |
|
|
|
""" |
|
if midi_file is not None: |
|
|
|
if isinstance(midi_file, six.string_types) or isinstance(midi_file, pathlib.PurePath): |
|
|
|
midi_data = mido.MidiFile(filename=midi_file, charset=charset) |
|
else: |
|
|
|
midi_data = mido.MidiFile(file=midi_file, charset=charset) |
|
|
|
|
|
for track in midi_data.tracks: |
|
tick = 0 |
|
for event in track: |
|
event.time += tick |
|
tick = event.time |
|
|
|
|
|
self.resolution = midi_data.ticks_per_beat |
|
|
|
|
|
self._load_tempo_changes(midi_data) |
|
|
|
|
|
max_tick = max([max([e.time for e in t]) |
|
for t in midi_data.tracks]) + 1 |
|
|
|
|
|
if max_tick > MAX_TICK: |
|
raise ValueError(('MIDI file has a largest tick of {},' |
|
' it is likely corrupt'.format(max_tick))) |
|
|
|
|
|
self._update_tick_to_time(max_tick) |
|
|
|
|
|
self._load_metadata(midi_data) |
|
|
|
|
|
|
|
if any(e.type in ('set_tempo', 'key_signature', 'time_signature') |
|
for track in midi_data.tracks[1:] for e in track): |
|
warnings.warn( |
|
"Tempo, Key or Time signature change events found on " |
|
"non-zero tracks. This is not a valid type 0 or type 1 " |
|
"MIDI file. Tempo, Key or Time Signature may be wrong.", |
|
RuntimeWarning) |
|
|
|
|
|
self._load_instruments(midi_data) |
|
|
|
self._charset = charset |
|
else: |
|
self.resolution = resolution |
|
|
|
|
|
self._tick_scales = [(0, 60.0/(initial_tempo*self.resolution))] |
|
|
|
self.__tick_to_time = [0] |
|
|
|
self.instruments = [] |
|
|
|
self.key_signature_changes = [] |
|
|
|
self.time_signature_changes = [] |
|
|
|
self.lyrics = [] |
|
|
|
self.text_events = [] |
|
|
|
self._charset = charset |
|
|
|
def _load_tempo_changes(self, midi_data): |
|
"""Populates ``self._tick_scales`` with tuples of |
|
``(tick, tick_scale)`` loaded from ``midi_data``. |
|
|
|
Parameters |
|
---------- |
|
midi_data : midi.FileReader |
|
MIDI object from which data will be read. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._tick_scales = [(0, 60.0/(120.0*self.resolution))] |
|
|
|
|
|
|
|
|
|
for event in midi_data.tracks[0]: |
|
if event.type == 'set_tempo': |
|
|
|
if event.time == 0: |
|
bpm = 6e7/event.tempo |
|
self._tick_scales = [(0, 60.0/(bpm*self.resolution))] |
|
else: |
|
|
|
_, last_tick_scale = self._tick_scales[-1] |
|
tick_scale = 60.0/((6e7/event.tempo)*self.resolution) |
|
|
|
if tick_scale != last_tick_scale: |
|
self._tick_scales.append((event.time, tick_scale)) |
|
|
|
def _load_metadata(self, midi_data): |
|
"""Populates ``self.time_signature_changes`` with ``TimeSignature`` |
|
objects, ``self.key_signature_changes`` with ``KeySignature`` objects, |
|
``self.lyrics`` with ``Lyric`` objects and ``self.text_events`` with |
|
``Text`` objects. |
|
|
|
Parameters |
|
---------- |
|
midi_data : midi.FileReader |
|
MIDI object from which data will be read. |
|
""" |
|
|
|
|
|
|
|
self.key_signature_changes = [] |
|
self.time_signature_changes = [] |
|
self.lyrics = [] |
|
self.text_events = [] |
|
|
|
for event in midi_data.tracks[0]: |
|
if event.type == 'key_signature': |
|
key_obj = KeySignature( |
|
key_name_to_key_number(event.key), |
|
self.__tick_to_time[event.time]) |
|
self.key_signature_changes.append(key_obj) |
|
|
|
elif event.type == 'time_signature': |
|
ts_obj = TimeSignature(event.numerator, |
|
event.denominator, |
|
self.__tick_to_time[event.time]) |
|
self.time_signature_changes.append(ts_obj) |
|
|
|
|
|
|
|
tracks_with_lyrics = [] |
|
tracks_with_text_events = [] |
|
for track in midi_data.tracks: |
|
|
|
lyrics = [] |
|
text_events = [] |
|
for event in track: |
|
if event.type == 'lyrics': |
|
lyrics.append(Lyric( |
|
event.text, self.__tick_to_time[event.time])) |
|
elif event.type == 'text': |
|
text_events.append(Text( |
|
event.text, self.__tick_to_time[event.time])) |
|
|
|
if lyrics: |
|
tracks_with_lyrics.append(lyrics) |
|
if text_events: |
|
tracks_with_text_events.append(text_events) |
|
|
|
|
|
self.lyrics = list(merge(*tracks_with_lyrics, key=lambda x: x.time)) |
|
self.text_events = list(merge(*tracks_with_text_events, key=lambda x: x.time)) |
|
|
|
|
|
def _update_tick_to_time(self, max_tick): |
|
"""Creates ``self.__tick_to_time``, a class member array which maps |
|
ticks to time starting from tick 0 and ending at ``max_tick``. |
|
|
|
Parameters |
|
---------- |
|
max_tick : int |
|
Last tick to compute time for. If ``self._tick_scales`` contains a |
|
tick which is larger than this value, it will be used instead. |
|
|
|
""" |
|
|
|
|
|
max_scale_tick = max(ts[0] for ts in self._tick_scales) |
|
max_tick = max_tick if max_tick > max_scale_tick else max_scale_tick |
|
|
|
self.__tick_to_time = np.zeros(max_tick + 1) |
|
|
|
last_end_time = 0 |
|
|
|
for (start_tick, tick_scale), (end_tick, _) in \ |
|
zip(self._tick_scales[:-1], self._tick_scales[1:]): |
|
|
|
ticks = np.arange(end_tick - start_tick + 1) |
|
self.__tick_to_time[start_tick:end_tick + 1] = (last_end_time + |
|
tick_scale*ticks) |
|
|
|
last_end_time = self.__tick_to_time[end_tick] |
|
|
|
|
|
start_tick, tick_scale = self._tick_scales[-1] |
|
ticks = np.arange(max_tick + 1 - start_tick) |
|
self.__tick_to_time[start_tick:] = (last_end_time + |
|
tick_scale*ticks) |
|
|
|
def _load_instruments(self, midi_data): |
|
"""Populates ``self.instruments`` using ``midi_data``. |
|
|
|
Parameters |
|
---------- |
|
midi_data : midi.FileReader |
|
MIDI object from which data will be read. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
instrument_map = collections.OrderedDict() |
|
|
|
|
|
stragglers = {} |
|
|
|
track_name_map = collections.defaultdict(str) |
|
|
|
def __get_instrument(program, channel, track, create_new): |
|
"""Gets the Instrument corresponding to the given program number, |
|
drum/non-drum type, channel, and track index. If no such |
|
instrument exists, one is created. |
|
|
|
""" |
|
|
|
|
|
if (program, channel, track) in instrument_map: |
|
return instrument_map[(program, channel, track)] |
|
|
|
|
|
if not create_new and (channel, track) in stragglers: |
|
return stragglers[(channel, track)] |
|
|
|
if create_new: |
|
is_drum = (channel == 9) |
|
instrument = Instrument( |
|
program, is_drum, track_name_map[track_idx]) |
|
|
|
|
|
if (channel, track) in stragglers: |
|
straggler = stragglers[(channel, track)] |
|
instrument.control_changes = straggler.control_changes |
|
instrument.pitch_bends = straggler.pitch_bends |
|
|
|
instrument_map[(program, channel, track)] = instrument |
|
|
|
|
|
|
|
else: |
|
|
|
instrument = Instrument(program, track_name_map[track_idx]) |
|
|
|
|
|
|
|
stragglers[(channel, track)] = instrument |
|
return instrument |
|
|
|
for track_idx, track in enumerate(midi_data.tracks): |
|
|
|
|
|
|
|
last_note_on = collections.defaultdict(list) |
|
|
|
|
|
current_instrument = np.zeros(16, dtype=np.int32) |
|
for event in track: |
|
|
|
if event.type == 'track_name': |
|
|
|
track_name_map[track_idx] = event.name |
|
|
|
if event.type == 'program_change': |
|
|
|
current_instrument[event.channel] = event.program |
|
|
|
elif event.type == 'note_on' and event.velocity > 0: |
|
|
|
note_on_index = (event.channel, event.note) |
|
last_note_on[note_on_index].append(( |
|
event.time, event.velocity)) |
|
|
|
elif event.type == 'note_off' or (event.type == 'note_on' and |
|
event.velocity == 0): |
|
|
|
key = (event.channel, event.note) |
|
if key in last_note_on: |
|
|
|
|
|
|
|
|
|
|
|
|
|
end_tick = event.time |
|
open_notes = last_note_on[key] |
|
|
|
notes_to_close = [ |
|
(start_tick, velocity) |
|
for start_tick, velocity in open_notes |
|
if start_tick != end_tick] |
|
notes_to_keep = [ |
|
(start_tick, velocity) |
|
for start_tick, velocity in open_notes |
|
if start_tick == end_tick] |
|
|
|
for start_tick, velocity in notes_to_close: |
|
start_time = self.__tick_to_time[start_tick] |
|
end_time = self.__tick_to_time[end_tick] |
|
|
|
note = Note(velocity, event.note, start_time, |
|
end_time) |
|
|
|
|
|
program = current_instrument[event.channel] |
|
|
|
|
|
|
|
instrument = __get_instrument( |
|
program, event.channel, track_idx, 1) |
|
|
|
instrument.notes.append(note) |
|
|
|
if len(notes_to_close) > 0 and len(notes_to_keep) > 0: |
|
|
|
|
|
last_note_on[key] = notes_to_keep |
|
else: |
|
|
|
del last_note_on[key] |
|
|
|
elif event.type == 'pitchwheel': |
|
|
|
bend = PitchBend(event.pitch, |
|
self.__tick_to_time[event.time]) |
|
|
|
program = current_instrument[event.channel] |
|
|
|
|
|
instrument = __get_instrument( |
|
program, event.channel, track_idx, 0) |
|
|
|
instrument.pitch_bends.append(bend) |
|
|
|
elif event.type == 'control_change': |
|
control_change = ControlChange( |
|
event.control, event.value, |
|
self.__tick_to_time[event.time]) |
|
|
|
program = current_instrument[event.channel] |
|
|
|
|
|
instrument = __get_instrument( |
|
program, event.channel, track_idx, 0) |
|
|
|
instrument.control_changes.append(control_change) |
|
|
|
self.instruments = [i for i in instrument_map.values()] |
|
|
|
def get_tempo_changes(self): |
|
"""Return arrays of tempo changes in quarter notes-per-minute and their |
|
times. |
|
|
|
Returns |
|
------- |
|
tempo_change_times : np.ndarray |
|
Times, in seconds, where the tempo changes. |
|
tempi : np.ndarray |
|
What the tempo is, in quarter notes-per-minute, at each point in |
|
time in ``tempo_change_times``. |
|
|
|
""" |
|
|
|
|
|
tempo_change_times = np.zeros(len(self._tick_scales)) |
|
tempi = np.zeros(len(self._tick_scales)) |
|
for n, (tick, tick_scale) in enumerate(self._tick_scales): |
|
|
|
tempo_change_times[n] = self.tick_to_time(tick) |
|
|
|
tempi[n] = 60.0/(tick_scale*self.resolution) |
|
return tempo_change_times, tempi |
|
|
|
def get_end_time(self): |
|
"""Returns the time of the end of the MIDI object (time of the last |
|
event in all instruments/meta-events). |
|
|
|
Returns |
|
------- |
|
end_time : float |
|
Time, in seconds, where this MIDI file ends. |
|
|
|
""" |
|
|
|
meta_events = [self.time_signature_changes, self.key_signature_changes, |
|
self.lyrics, self.text_events] |
|
times = ([i.get_end_time() for i in self.instruments] + |
|
[e.time for m in meta_events for e in m] + |
|
self.get_tempo_changes()[0].tolist()) |
|
|
|
if len(times) == 0: |
|
return 0. |
|
else: |
|
return max(times) |
|
|
|
def estimate_tempi(self): |
|
"""Return an empirical estimate of tempos and each tempo's probability. |
|
Based on "Automatic Extraction of Tempo and Beat from Expressive |
|
Performance", Dixon 2001. |
|
|
|
Returns |
|
------- |
|
tempos : np.ndarray |
|
Array of estimated tempos, in beats per minute. |
|
probabilities : np.ndarray |
|
Array of the probabilities of each tempo estimate. |
|
|
|
""" |
|
|
|
onsets = self.get_onsets() |
|
|
|
ioi = np.diff(onsets) |
|
|
|
|
|
ioi = ioi[ioi > .05] |
|
ioi = ioi[ioi < 2] |
|
|
|
for n in range(ioi.shape[0]): |
|
while ioi[n] < .2: |
|
ioi[n] *= 2 |
|
|
|
clusters = np.array([]) |
|
|
|
cluster_counts = np.array([]) |
|
for interval in ioi: |
|
|
|
if (np.abs(clusters - interval) < .025).any(): |
|
k = np.argmin(clusters - interval) |
|
|
|
clusters[k] = (cluster_counts[k]*clusters[k] + |
|
interval)/(cluster_counts[k] + 1) |
|
|
|
cluster_counts[k] += 1 |
|
|
|
else: |
|
clusters = np.append(clusters, interval) |
|
cluster_counts = np.append(cluster_counts, 1.) |
|
|
|
cluster_sort = np.argsort(cluster_counts)[::-1] |
|
clusters = clusters[cluster_sort] |
|
cluster_counts = cluster_counts[cluster_sort] |
|
|
|
cluster_counts /= cluster_counts.sum() |
|
return 60./clusters, cluster_counts |
|
|
|
def estimate_tempo(self): |
|
"""Returns the best tempo estimate from |
|
:func:`pretty_midi.PrettyMIDI.estimate_tempi()`, for convenience. |
|
|
|
Returns |
|
------- |
|
tempo : float |
|
Estimated tempo, in bpm |
|
|
|
""" |
|
tempi = self.estimate_tempi()[0] |
|
if tempi.size == 0: |
|
raise ValueError("Can't provide a global tempo estimate when there" |
|
" are fewer than two notes.") |
|
return tempi[0] |
|
|
|
def get_beats(self, start_time=0.): |
|
"""Returns a list of beat locations, according to MIDI tempo changes. |
|
For compound meters (any whose numerator is a multiple of 3 greater |
|
than 3), this method returns every third denominator note (for 6/8 |
|
or 6/16 time, for example, it will return every third 8th note or |
|
16th note, respectively). For all other meters, this method returns |
|
every denominator note (every quarter note for 3/4 or 4/4 time, for |
|
example). |
|
|
|
Parameters |
|
---------- |
|
start_time : float |
|
Location of the first beat, in seconds. |
|
|
|
Returns |
|
------- |
|
beats : np.ndarray |
|
Beat locations, in seconds. |
|
|
|
""" |
|
|
|
tempo_change_times, tempi = self.get_tempo_changes() |
|
|
|
beats = [start_time] |
|
|
|
tempo_idx = 0 |
|
|
|
while (tempo_idx < tempo_change_times.shape[0] - 1 and |
|
beats[-1] > tempo_change_times[tempo_idx + 1]): |
|
tempo_idx += 1 |
|
|
|
self.time_signature_changes.sort(key=lambda ts: ts.time) |
|
|
|
ts_idx = 0 |
|
|
|
while (ts_idx < len(self.time_signature_changes) - 1 and |
|
beats[-1] >= self.time_signature_changes[ts_idx + 1].time): |
|
ts_idx += 1 |
|
|
|
def get_current_bpm(): |
|
''' Convenience function which computs the current BPM based on the |
|
current tempo change and time signature events ''' |
|
|
|
if self.time_signature_changes: |
|
return qpm_to_bpm( |
|
tempi[tempo_idx], |
|
self.time_signature_changes[ts_idx].numerator, |
|
self.time_signature_changes[ts_idx].denominator) |
|
|
|
else: |
|
return tempi[tempo_idx] |
|
|
|
def gt_or_close(a, b): |
|
''' Returns True if a > b or a is close to b ''' |
|
return a > b or np.isclose(a, b) |
|
|
|
|
|
end_time = self.get_end_time() |
|
|
|
while beats[-1] < end_time: |
|
|
|
bpm = get_current_bpm() |
|
|
|
next_beat = beats[-1] + 60.0/bpm |
|
|
|
if (tempo_idx < tempo_change_times.shape[0] - 1 and |
|
next_beat > tempo_change_times[tempo_idx + 1]): |
|
|
|
next_beat = beats[-1] |
|
|
|
beat_remaining = 1.0 |
|
|
|
|
|
while (tempo_idx < tempo_change_times.shape[0] - 1 and |
|
next_beat + beat_remaining*60.0/bpm >= |
|
tempo_change_times[tempo_idx + 1]): |
|
|
|
overshot_ratio = (tempo_change_times[tempo_idx + 1] - |
|
next_beat)/(60.0/bpm) |
|
|
|
next_beat += overshot_ratio*60.0/bpm |
|
|
|
beat_remaining -= overshot_ratio |
|
|
|
tempo_idx = tempo_idx + 1 |
|
|
|
bpm = get_current_bpm() |
|
|
|
next_beat += beat_remaining*60./bpm |
|
|
|
if self.time_signature_changes and ts_idx == 0: |
|
current_ts_time = self.time_signature_changes[ts_idx].time |
|
if (current_ts_time > beats[-1] and |
|
gt_or_close(next_beat, current_ts_time)): |
|
|
|
next_beat = current_ts_time |
|
|
|
|
|
if ts_idx < len(self.time_signature_changes) - 1: |
|
|
|
next_ts_time = self.time_signature_changes[ts_idx + 1].time |
|
if gt_or_close(next_beat, next_ts_time): |
|
|
|
next_beat = next_ts_time |
|
|
|
ts_idx += 1 |
|
|
|
bpm = get_current_bpm() |
|
beats.append(next_beat) |
|
|
|
beats = np.array(beats[:-1]) |
|
return beats |
|
|
|
def estimate_beat_start(self, candidates=10, tolerance=.025): |
|
"""Estimate the location of the first beat based on which of the first |
|
few onsets results in the best correlation with the onset spike train. |
|
|
|
Parameters |
|
---------- |
|
candidates : int |
|
Number of candidate onsets to try. |
|
tolerance : float |
|
The tolerance in seconds around which onsets will be used to |
|
treat a beat as correct. |
|
|
|
Returns |
|
------- |
|
beat_start : float |
|
The offset which is chosen as the beat start location. |
|
""" |
|
|
|
note_list = [n for i in self.instruments for n in i.notes] |
|
if not note_list: |
|
raise ValueError( |
|
"Can't estimate beat start when there are no notes.") |
|
note_list.sort(key=lambda note: note.start) |
|
|
|
beat_candidates = [] |
|
|
|
start_times = [] |
|
onset_index = 0 |
|
|
|
while (len(beat_candidates) <= candidates and |
|
len(beat_candidates) <= len(note_list) and |
|
onset_index < len(note_list)): |
|
|
|
if onset_index == 0 or np.abs(note_list[onset_index - 1].start - |
|
note_list[onset_index].start) > .001: |
|
beat_candidates.append( |
|
self.get_beats(note_list[onset_index].start)) |
|
start_times.append(note_list[onset_index].start) |
|
onset_index += 1 |
|
|
|
onset_scores = np.zeros(len(beat_candidates)) |
|
|
|
fs = 1000 |
|
onset_signal = np.zeros(int(fs*(self.get_end_time() + 1))) |
|
for note in note_list: |
|
onset_signal[int(note.start*fs)] += note.velocity |
|
for n, beats in enumerate(beat_candidates): |
|
|
|
beat_signal = np.zeros(int(fs*(self.get_end_time() + 1))) |
|
for beat in np.append(0, beats): |
|
if beat - tolerance < 0: |
|
beat_window = np.ones( |
|
int(fs*2*tolerance + (beat - tolerance)*fs)) |
|
beat_signal[:int((beat + tolerance)*fs)] = beat_window |
|
else: |
|
beat_start = int((beat - tolerance)*fs) |
|
beat_end = beat_start + int(fs*tolerance*2) |
|
beat_window = np.ones(int(fs*tolerance*2)) |
|
beat_signal[beat_start:beat_end] = beat_window |
|
|
|
onset_scores[n] = np.dot(beat_signal, onset_signal)/beats.shape[0] |
|
|
|
return start_times[np.argmax(onset_scores)] |
|
|
|
def get_downbeats(self, start_time=0.): |
|
"""Return a list of downbeat locations, according to MIDI tempo changes |
|
and time signature change events. |
|
|
|
Parameters |
|
---------- |
|
start_time : float |
|
Location of the first downbeat, in seconds. |
|
|
|
Returns |
|
------- |
|
downbeats : np.ndarray |
|
Downbeat locations, in seconds. |
|
|
|
""" |
|
|
|
beats = self.get_beats(start_time) |
|
|
|
time_signatures = copy.deepcopy(self.time_signature_changes) |
|
|
|
|
|
|
|
if not time_signatures or time_signatures[0].time > start_time: |
|
time_signatures.insert(0, TimeSignature(4, 4, start_time)) |
|
|
|
def index(array, value, default): |
|
""" Returns the first index of a value in an array, or `default` if |
|
the value doesn't appear in the array.""" |
|
idx = np.flatnonzero(np.isclose(array, value)) |
|
if idx.size > 0: |
|
return idx[0] |
|
else: |
|
return default |
|
|
|
downbeats = [] |
|
end_beat_idx = 0 |
|
|
|
for start_ts, end_ts in zip(time_signatures[:-1], time_signatures[1:]): |
|
|
|
start_beat_idx = index(beats, start_ts.time, 0) |
|
|
|
end_beat_idx = index(beats, end_ts.time, start_beat_idx) |
|
|
|
|
|
if start_ts.numerator % 3 == 0 and start_ts.numerator != 3: |
|
downbeats.append(beats[ |
|
start_beat_idx:end_beat_idx:(start_ts.numerator // 3)]) |
|
else: |
|
downbeats.append(beats[ |
|
start_beat_idx:end_beat_idx:start_ts.numerator]) |
|
|
|
final_ts = time_signatures[-1] |
|
start_beat_idx = index(beats, final_ts.time, end_beat_idx) |
|
if final_ts.numerator % 3 == 0 and final_ts.numerator != 3: |
|
downbeats.append(beats[start_beat_idx::(final_ts.numerator // 3)]) |
|
else: |
|
downbeats.append(beats[start_beat_idx::final_ts.numerator]) |
|
|
|
downbeats = np.concatenate(downbeats) |
|
|
|
return downbeats[downbeats >= start_time] |
|
|
|
def get_onsets(self): |
|
"""Return a sorted list of the times of all onsets of all notes from |
|
all instruments. May have duplicate entries. |
|
|
|
Returns |
|
------- |
|
onsets : np.ndarray |
|
Onset locations, in seconds. |
|
|
|
""" |
|
onsets = np.array([]) |
|
|
|
for instrument in self.instruments: |
|
onsets = np.append(onsets, instrument.get_onsets()) |
|
|
|
return np.sort(onsets) |
|
|
|
def get_piano_roll(self, fs=100, times=None, pedal_threshold=64, onset=False): |
|
"""Compute a piano roll matrix of the MIDI data. |
|
|
|
Parameters |
|
---------- |
|
fs : int |
|
Sampling frequency of the columns, i.e. each column is spaced apart |
|
by ``1./fs`` seconds. |
|
times : np.ndarray |
|
Times of the start of each column in the piano roll. |
|
Default ``None`` which is ``np.arange(0, get_end_time(), 1./fs)``. |
|
pedal_threshold : int |
|
Value of control change 64 (sustain pedal) message that is less |
|
than this value is reflected as pedal-off. Pedals will be |
|
reflected as elongation of notes in the piano roll. |
|
If None, then CC64 message is ignored. |
|
Default is 64. |
|
|
|
Returns |
|
------- |
|
piano_roll : np.ndarray, shape=(128,times.shape[0]) |
|
Piano roll of MIDI data, flattened across instruments. |
|
|
|
""" |
|
|
|
|
|
if len(self.instruments) == 0: |
|
return np.zeros((128, 0)) |
|
|
|
|
|
piano_rolls = [] |
|
onset_rolls = [] |
|
for i in self.instruments: |
|
if onset: |
|
piano_roll, onset_roll = i.get_piano_roll(fs=fs, times=times, |
|
pedal_threshold=pedal_threshold, onset=onset) |
|
onset_rolls.append(onset_roll) |
|
else: |
|
piano_roll = i.get_piano_roll(fs=fs, times=times, pedal_threshold=pedal_threshold, onset=onset) |
|
piano_rolls.append(piano_roll) |
|
|
|
|
|
piano_roll = np.zeros((128, np.max([p.shape[1] for p in piano_rolls]))) |
|
if onset: |
|
onset_roll = np.zeros((128, np.max([p.shape[1] for p in onset_rolls]))) |
|
|
|
for roll in piano_rolls: |
|
piano_roll[:, :roll.shape[1]] += roll |
|
if onset: |
|
for roll in onset_rolls: |
|
onset_roll[:, :roll.shape[1]] += roll |
|
onset_roll = np.clip(onset_roll, 0, 127) |
|
if onset: |
|
return piano_roll, onset_roll |
|
else: |
|
return piano_roll |
|
|
|
def get_intervals_and_pitches(self): |
|
notes = [n for i in self.instruments for n in i.notes] |
|
notes = sorted(notes, key=lambda n: n.start) |
|
intervals = np.array([[n.start, n.end] for n in notes]) |
|
pitches = np.array([note_number_to_hz(n.pitch) for n in notes]) |
|
return intervals, pitches |
|
|
|
def get_pitch_class_histogram(self, use_duration=False, |
|
use_velocity=False, normalize=True): |
|
"""Computes the histogram of pitch classes. |
|
|
|
Parameters |
|
---------- |
|
use_duration : bool |
|
Weight frequency by note duration. |
|
use_velocity : bool |
|
Weight frequency by note velocity. |
|
normalize : bool |
|
Normalizes the histogram such that the sum of bin values is 1. |
|
|
|
Returns |
|
------- |
|
histogram : np.ndarray, shape=(12,) |
|
Histogram of pitch classes given all tracks, optionally weighted |
|
by their durations or velocities. |
|
""" |
|
|
|
histogram = sum([ |
|
i.get_pitch_class_histogram(use_duration, use_velocity) |
|
for i in self.instruments], np.zeros(12)) |
|
|
|
|
|
if normalize: |
|
histogram /= (histogram.sum() + (histogram.sum() == 0)) |
|
|
|
return histogram |
|
|
|
def get_pitch_class_transition_matrix(self, normalize=False, |
|
time_thresh=0.05): |
|
"""Computes the total pitch class transition matrix of all instruments. |
|
Transitions are added whenever the end of a note is within |
|
``time_thresh`` from the start of any other note. |
|
|
|
Parameters |
|
---------- |
|
normalize : bool |
|
Normalize transition matrix such that matrix sum equals is 1. |
|
time_thresh : float |
|
Maximum temporal threshold, in seconds, between the start of a note |
|
and end time of any other note for a transition to be added. |
|
|
|
Returns |
|
------- |
|
pitch_class_transition_matrix : np.ndarray, shape=(12,12) |
|
Pitch class transition matrix. |
|
""" |
|
|
|
pc_trans_mat = sum( |
|
[i.get_pitch_class_transition_matrix(normalize, time_thresh) |
|
for i in self.instruments], np.zeros((12, 12))) |
|
|
|
|
|
if normalize: |
|
pc_trans_mat /= (pc_trans_mat.sum() + (pc_trans_mat.sum() == 0)) |
|
|
|
return pc_trans_mat |
|
|
|
def get_chroma(self, fs=100, times=None, pedal_threshold=64): |
|
"""Get the MIDI data as a sequence of chroma vectors. |
|
|
|
Parameters |
|
---------- |
|
fs : int |
|
Sampling frequency of the columns, i.e. each column is spaced apart |
|
by ``1./fs`` seconds. |
|
times : np.ndarray |
|
Times of the start of each column in the piano roll. |
|
Default ``None`` which is ``np.arange(0, get_end_time(), 1./fs)``. |
|
pedal_threshold : int |
|
Value of control change 64 (sustain pedal) message that is less |
|
than this value is reflected as pedal-off. Pedals will be |
|
reflected as elongation of notes in the piano roll. |
|
If None, then CC64 message is ignored. |
|
Default is 64. |
|
|
|
Returns |
|
------- |
|
piano_roll : np.ndarray, shape=(12,times.shape[0]) |
|
Chromagram of MIDI data, flattened across instruments. |
|
|
|
""" |
|
|
|
piano_roll = self.get_piano_roll(fs=fs, times=times, |
|
pedal_threshold=pedal_threshold) |
|
|
|
chroma_matrix = np.zeros((12, piano_roll.shape[1])) |
|
for note in range(12): |
|
chroma_matrix[note, :] = np.sum(piano_roll[note::12], axis=0) |
|
return chroma_matrix |
|
|
|
def synthesize(self, fs=44100, wave=np.sin): |
|
"""Synthesize the pattern using some waveshape. Ignores drum track. |
|
|
|
Parameters |
|
---------- |
|
fs : int |
|
Sampling rate of the synthesized audio signal. |
|
wave : function |
|
Function which returns a periodic waveform, |
|
e.g. ``np.sin``, ``scipy.signal.square``, etc. |
|
|
|
Returns |
|
------- |
|
synthesized : np.ndarray |
|
Waveform of the MIDI data, synthesized at ``fs``. |
|
|
|
""" |
|
|
|
if len(self.instruments) == 0: |
|
return np.array([]) |
|
|
|
waveforms = [i.synthesize(fs=fs, wave=wave) for i in self.instruments] |
|
|
|
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms])) |
|
|
|
for waveform in waveforms: |
|
synthesized[:waveform.shape[0]] += waveform |
|
|
|
synthesized /= np.abs(synthesized).max() |
|
return synthesized |
|
|
|
def fluidsynth(self, fs=None, synthesizer=None, sfid=0, sf2_path=None): |
|
"""Synthesize using fluidsynth. |
|
|
|
Parameters |
|
---------- |
|
fs : int |
|
Sampling rate to synthesize at. |
|
Default ``None``, which takes the sampling rate from ``synthesizer``, or |
|
uses ``pretty_midi.fluidsynth.DEFAULT_SAMPLE_RATE`` = 44100 if a synthesizer |
|
needs to be created. |
|
synthesizer : fluidsynth.Synth or str |
|
fluidsynth.Synth instance to use or a string with the path to a .sf2 file. |
|
Default ``None``, which creates a new instance using the TimGM6mb.sf2 file |
|
included with ``pretty_midi``. |
|
sfid : int |
|
Soundfont ID to use if an instance of fluidsynth.Synth is provided. |
|
Default ``0``, which uses the first soundfont. |
|
sf2_path : str |
|
Path to a .sf2 file. |
|
Default ``None``, which uses the TimGM6mb.sf2 file included with |
|
``pretty_midi``. |
|
.. deprecated:: 0.2.11 |
|
Use :param:`synthesizer` instead. |
|
|
|
Returns |
|
------- |
|
synthesized : np.ndarray |
|
Waveform of the MIDI data, synthesized at ``fs``. |
|
|
|
""" |
|
|
|
if sf2_path is not None: |
|
warn("The parameter 'sf2_path' is deprecated, please use 'synthesizer' instead.", |
|
DeprecationWarning, 2) |
|
if synthesizer is not None: |
|
raise ValueError("sf2_path and synthesizer cannot both be supplied.") |
|
else: |
|
synthesizer = sf2_path |
|
|
|
|
|
|
|
if len(self.instruments) == 0 or all(len(i.notes) == 0 |
|
for i in self.instruments): |
|
return np.array([]) |
|
|
|
|
|
synthesizer, sfid, delete_synthesizer = get_fluidsynth_instance(synthesizer, sfid, fs) |
|
|
|
|
|
waveforms = [i.fluidsynth(synthesizer=synthesizer, sfid=sfid) |
|
for i in self.instruments] |
|
|
|
|
|
if delete_synthesizer: |
|
synthesizer.delete() |
|
|
|
|
|
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms])) |
|
|
|
for waveform in waveforms: |
|
synthesized[:waveform.shape[0]] += waveform |
|
|
|
synthesized /= np.abs(synthesized).max() |
|
return synthesized |
|
|
|
def tick_to_time(self, tick): |
|
"""Converts from an absolute tick to time in seconds using |
|
``self.__tick_to_time``. |
|
|
|
Parameters |
|
---------- |
|
tick : int |
|
Absolute tick to convert. |
|
|
|
Returns |
|
------- |
|
time : float |
|
Time in seconds of tick. |
|
|
|
""" |
|
|
|
if tick >= MAX_TICK: |
|
raise IndexError('Supplied tick is too large.') |
|
|
|
if tick >= len(self.__tick_to_time): |
|
self._update_tick_to_time(tick) |
|
|
|
if not isinstance(tick, int): |
|
warnings.warn('tick should be an int.') |
|
|
|
return self.__tick_to_time[int(tick)] |
|
|
|
def time_to_tick(self, time): |
|
"""Converts from a time in seconds to absolute tick using |
|
``self._tick_scales``. |
|
|
|
Parameters |
|
---------- |
|
time : float |
|
Time, in seconds. |
|
|
|
Returns |
|
------- |
|
tick : int |
|
Absolute tick corresponding to the supplied time. |
|
|
|
""" |
|
|
|
tick = np.searchsorted(self.__tick_to_time, time, side="left") |
|
|
|
if tick == len(self.__tick_to_time): |
|
|
|
tick -= 1 |
|
|
|
_, final_tick_scale = self._tick_scales[-1] |
|
tick += (time - self.__tick_to_time[tick])/final_tick_scale |
|
|
|
return int(round(tick)) |
|
|
|
if tick and (math.fabs(time - self.__tick_to_time[tick - 1]) < |
|
math.fabs(time - self.__tick_to_time[tick])): |
|
|
|
return tick - 1 |
|
else: |
|
return tick |
|
|
|
def adjust_times(self, original_times, new_times): |
|
"""Adjusts the timing of the events in the MIDI object. |
|
The parameters ``original_times`` and ``new_times`` define a mapping, |
|
so that if an event originally occurs at time ``original_times[n]``, it |
|
will be moved so that it occurs at ``new_times[n]``. If events don't |
|
occur exactly on a time in ``original_times``, their timing will be |
|
linearly interpolated. |
|
|
|
Parameters |
|
---------- |
|
original_times : np.ndarray |
|
Times to map from. |
|
new_times : np.ndarray |
|
New times to map to. |
|
|
|
""" |
|
|
|
|
|
original_downbeats = self.get_downbeats() |
|
|
|
|
|
original_size = len(original_times) |
|
original_times, unique_idx = np.unique(original_times, |
|
return_index=True) |
|
if ((unique_idx.size != original_size) or |
|
any(unique_idx != np.arange(unique_idx.size))): |
|
warnings.warn('original_times must be strictly increasing; ' |
|
'automatically enforcing this.') |
|
new_times = np.asarray(new_times)[unique_idx] |
|
if not np.all(np.diff(new_times) >= 0): |
|
warnings.warn('new_times must be monotonic; ' |
|
'automatically enforcing this.') |
|
new_times = np.maximum.accumulate(new_times) |
|
|
|
for instrument in self.instruments: |
|
instrument.notes = [copy.deepcopy(note) |
|
for note in instrument.notes |
|
if note.start >= original_times[0] and |
|
note.end <= original_times[-1]] |
|
|
|
note_ons = np.array([note.start for instrument in self.instruments |
|
for note in instrument.notes]) |
|
adjusted_note_ons = np.interp(note_ons, original_times, new_times) |
|
|
|
note_offs = np.array([note.end for instrument in self.instruments |
|
for note in instrument.notes]) |
|
adjusted_note_offs = np.interp(note_offs, original_times, new_times) |
|
|
|
for n, note in enumerate([note for instrument in self.instruments |
|
for note in instrument.notes]): |
|
note.start = (adjusted_note_ons[n] > 0)*adjusted_note_ons[n] |
|
note.end = (adjusted_note_offs[n] > 0)*adjusted_note_offs[n] |
|
|
|
|
|
self.remove_invalid_notes() |
|
|
|
def adjust_events(event_getter): |
|
""" This function calls event_getter with each instrument as the |
|
sole argument and adjusts the events which are returned.""" |
|
|
|
for instrument in self.instruments: |
|
event_getter(instrument).sort(key=lambda e: e.time) |
|
|
|
event_times = np.array( |
|
[event.time for instrument in self.instruments |
|
for event in event_getter(instrument)]) |
|
adjusted_event_times = np.interp( |
|
event_times, original_times, new_times) |
|
for n, event in enumerate([event for instrument in self.instruments |
|
for event in event_getter(instrument)]): |
|
event.time = adjusted_event_times[n] |
|
for instrument in self.instruments: |
|
|
|
|
|
valid_events = [event for event in event_getter(instrument) |
|
if event.time == new_times[0]] |
|
if valid_events: |
|
valid_events = valid_events[-1:] |
|
|
|
valid_events.extend( |
|
event for event in event_getter(instrument) |
|
if event.time > new_times[0] and |
|
event.time < new_times[-1]) |
|
event_getter(instrument)[:] = valid_events |
|
|
|
|
|
adjust_events(lambda i: i.pitch_bends) |
|
adjust_events(lambda i: i.control_changes) |
|
|
|
def adjust_meta(events): |
|
""" This function adjusts the timing of the track-level meta-events |
|
in the provided list""" |
|
|
|
events.sort(key=lambda e: e.time) |
|
|
|
event_times = np.array([event.time for event in events]) |
|
adjusted_event_times = np.interp( |
|
event_times, original_times, new_times) |
|
for event, adjusted_event_time in zip(events, |
|
adjusted_event_times): |
|
event.time = adjusted_event_time |
|
|
|
valid_events = [event for event in events |
|
if event.time == new_times[0]] |
|
if valid_events: |
|
valid_events = valid_events[-1:] |
|
|
|
valid_events.extend( |
|
event for event in events |
|
if event.time > new_times[0] and event.time < new_times[-1]) |
|
events[:] = valid_events |
|
|
|
|
|
adjust_meta(self.key_signature_changes) |
|
|
|
adjust_meta(self.lyrics) |
|
|
|
adjust_meta(self.text_events) |
|
|
|
|
|
original_downbeats = original_downbeats[ |
|
original_downbeats >= original_times[0]] |
|
|
|
adjusted_downbeats = np.interp( |
|
original_downbeats, original_times, new_times) |
|
|
|
adjust_meta(self.time_signature_changes) |
|
|
|
if adjusted_downbeats.size > 0: |
|
|
|
|
|
ts_changes_before_downbeat = [ |
|
t for t in self.time_signature_changes |
|
if t.time <= adjusted_downbeats[0]] |
|
if ts_changes_before_downbeat: |
|
ts_changes_before_downbeat[-1].time = adjusted_downbeats[0] |
|
|
|
|
|
self.time_signature_changes = [ |
|
t for t in self.time_signature_changes |
|
if t.time >= adjusted_downbeats[0]] |
|
else: |
|
|
|
self.time_signature_changes.insert( |
|
0, TimeSignature(4, 4, adjusted_downbeats[0])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._update_tick_to_time(self.time_to_tick(original_times[-1])) |
|
original_times = [self.__tick_to_time[self.time_to_tick(time)] |
|
for time in original_times] |
|
|
|
tempo_change_times, tempo_changes = self.get_tempo_changes() |
|
|
|
|
|
|
|
non_repeats = [0] + [n for n in range(1, len(new_times)) |
|
if new_times[n - 1] != new_times[n] and |
|
original_times[n - 1] != original_times[n]] |
|
new_times = [new_times[n] for n in non_repeats] |
|
original_times = [original_times[n] for n in non_repeats] |
|
|
|
|
|
speed_scales = np.diff(original_times)/np.diff(new_times) |
|
|
|
tempo_idx = 0 |
|
while (tempo_idx + 1 < len(tempo_changes) and |
|
original_times[0] >= tempo_change_times[tempo_idx + 1]): |
|
tempo_idx += 1 |
|
|
|
new_tempo_change_times, new_tempo_changes = [], [] |
|
for start_time, end_time, speed_scale in zip( |
|
original_times[:-1], original_times[1:], speed_scales): |
|
|
|
new_tempo_change_times.append(start_time) |
|
new_tempo_changes.append(tempo_changes[tempo_idx]*speed_scale) |
|
|
|
while (tempo_idx + 1 < len(tempo_changes) and |
|
start_time <= tempo_change_times[tempo_idx + 1] and |
|
end_time > tempo_change_times[tempo_idx + 1]): |
|
tempo_idx += 1 |
|
new_tempo_change_times.append(tempo_change_times[tempo_idx]) |
|
new_tempo_changes.append(tempo_changes[tempo_idx]*speed_scale) |
|
|
|
new_tempo_change_times = np.interp( |
|
new_tempo_change_times, original_times, new_times) |
|
|
|
|
|
|
|
|
|
if new_tempo_change_times[0] == 0: |
|
last_tick = 0 |
|
new_tempo_change_times = new_tempo_change_times[1:] |
|
last_tick_scale = 60.0/(new_tempo_changes[0]*self.resolution) |
|
new_tempo_changes = new_tempo_changes[1:] |
|
else: |
|
last_tick, last_tick_scale = 0, 60.0/(120.0*self.resolution) |
|
self._tick_scales = [(last_tick, last_tick_scale)] |
|
|
|
|
|
previous_time = 0. |
|
for time, tempo in zip(new_tempo_change_times, new_tempo_changes): |
|
|
|
|
|
tick = last_tick + (time - previous_time)/last_tick_scale |
|
|
|
tick_scale = 60.0/(tempo*self.resolution) |
|
|
|
if tick_scale != last_tick_scale: |
|
|
|
self._tick_scales.append((int(round(tick)), tick_scale)) |
|
|
|
previous_time = time |
|
last_tick, last_tick_scale = tick, tick_scale |
|
|
|
self._update_tick_to_time(self._tick_scales[-1][0] + 1) |
|
|
|
def remove_invalid_notes(self): |
|
"""Removes any notes whose end time is before or at their start time. |
|
|
|
""" |
|
|
|
for instrument in self.instruments: |
|
instrument.remove_invalid_notes() |
|
|
|
def write(self, filename): |
|
"""Write the MIDI data out to a .mid file. |
|
|
|
Parameters |
|
---------- |
|
filename : str or file |
|
Path or file to write .mid file to. |
|
|
|
""" |
|
|
|
def event_compare(event1, event2): |
|
"""Compares two events for sorting. |
|
|
|
Events are sorted by tick time ascending. Events with the same tick |
|
time ares sorted by event type. Some events are sorted by |
|
additional values. For example, Note On events are sorted by pitch |
|
then velocity, ensuring that a Note Off (Note On with velocity 0) |
|
will never follow a Note On with the same pitch. |
|
|
|
Parameters |
|
---------- |
|
event1, event2 : mido.Message |
|
Two events to be compared. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
secondary_sort = { |
|
'set_tempo': lambda e: (1 * 256 * 256), |
|
'time_signature': lambda e: (2 * 256 * 256), |
|
'key_signature': lambda e: (3 * 256 * 256), |
|
'lyrics': lambda e: (4 * 256 * 256), |
|
'text_events' :lambda e: (5 * 256 * 256), |
|
'program_change': lambda e: (6 * 256 * 256), |
|
'pitchwheel': lambda e: ((7 * 256 * 256) + e.pitch), |
|
'control_change': lambda e: ( |
|
(8 * 256 * 256) + (e.control * 256) + e.value), |
|
'note_off': lambda e: ((9 * 256 * 256) + (e.note * 256)), |
|
'note_on': lambda e: ( |
|
(10 * 256 * 256) + (e.note * 256) + e.velocity), |
|
'end_of_track': lambda e: (11 * 256 * 256) |
|
} |
|
|
|
|
|
|
|
if (event1.time == event2.time and |
|
event1.type in secondary_sort and |
|
event2.type in secondary_sort): |
|
return (secondary_sort[event1.type](event1) - |
|
secondary_sort[event2.type](event2)) |
|
|
|
return event1.time - event2.time |
|
|
|
|
|
mid = mido.MidiFile(ticks_per_beat=self.resolution, charset=self._charset) |
|
|
|
timing_track = mido.MidiTrack() |
|
|
|
add_ts = True |
|
if self.time_signature_changes: |
|
add_ts = min([ts.time for ts in self.time_signature_changes]) > 0.0 |
|
if add_ts: |
|
|
|
timing_track.append(mido.MetaMessage( |
|
'time_signature', time=0, numerator=4, denominator=4)) |
|
|
|
|
|
for (tick, tick_scale) in self._tick_scales: |
|
timing_track.append(mido.MetaMessage( |
|
'set_tempo', time=tick, |
|
|
|
tempo=int(6e7/(60./(tick_scale*self.resolution))))) |
|
|
|
for ts in self.time_signature_changes: |
|
timing_track.append(mido.MetaMessage( |
|
'time_signature', time=self.time_to_tick(ts.time), |
|
numerator=ts.numerator, denominator=ts.denominator)) |
|
|
|
|
|
|
|
key_number_to_mido_key_name = [ |
|
'C', 'Db', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B', |
|
'Cm', 'C#m', 'Dm', 'D#m', 'Em', 'Fm', 'F#m', 'Gm', 'G#m', 'Am', |
|
'Bbm', 'Bm'] |
|
for ks in self.key_signature_changes: |
|
timing_track.append(mido.MetaMessage( |
|
'key_signature', time=self.time_to_tick(ks.time), |
|
key=key_number_to_mido_key_name[ks.key_number])) |
|
|
|
for l in self.lyrics: |
|
timing_track.append(mido.MetaMessage( |
|
'lyrics', time=self.time_to_tick(l.time), text=l.text)) |
|
|
|
for l in self.text_events: |
|
timing_track.append(mido.MetaMessage( |
|
'text', time=self.time_to_tick(l.time), text=l.text)) |
|
|
|
timing_track.sort(key=functools.cmp_to_key(event_compare)) |
|
|
|
timing_track.append(mido.MetaMessage( |
|
'end_of_track', time=timing_track[-1].time + 1)) |
|
mid.tracks.append(timing_track) |
|
|
|
|
|
channels = list(range(16)) |
|
|
|
channels.remove(9) |
|
for n, instrument in enumerate(self.instruments): |
|
|
|
track = mido.MidiTrack() |
|
|
|
if instrument.name: |
|
track.append(mido.MetaMessage( |
|
'track_name', time=0, name=instrument.name)) |
|
|
|
if instrument.is_drum: |
|
channel = 9 |
|
|
|
else: |
|
channel = channels[n % len(channels)] |
|
|
|
track.append(mido.Message( |
|
'program_change', time=0, program=instrument.program, |
|
channel=channel)) |
|
|
|
for note in instrument.notes: |
|
|
|
track.append(mido.Message( |
|
'note_on', time=self.time_to_tick(note.start), |
|
channel=channel, note=note.pitch, velocity=note.velocity)) |
|
|
|
track.append(mido.Message( |
|
'note_on', time=self.time_to_tick(note.end), |
|
channel=channel, note=note.pitch, velocity=0)) |
|
|
|
for bend in instrument.pitch_bends: |
|
track.append(mido.Message( |
|
'pitchwheel', time=self.time_to_tick(bend.time), |
|
channel=channel, pitch=bend.pitch)) |
|
|
|
for control_change in instrument.control_changes: |
|
track.append(mido.Message( |
|
'control_change', |
|
time=self.time_to_tick(control_change.time), |
|
channel=channel, control=control_change.number, |
|
value=control_change.value)) |
|
|
|
track = sorted(track, key=functools.cmp_to_key(event_compare)) |
|
|
|
|
|
|
|
for n, (event1, event2) in enumerate(zip(track[:-1], track[1:])): |
|
if (event1.time == event2.time and |
|
event1.type == 'note_on' and |
|
event2.type == 'note_on' and |
|
event1.note == event2.note and |
|
event1.velocity != 0 and |
|
event2.velocity == 0): |
|
track[n] = event2 |
|
track[n + 1] = event1 |
|
|
|
track.append(mido.MetaMessage( |
|
'end_of_track', time=track[-1].time + 1)) |
|
|
|
mid.tracks.append(track) |
|
|
|
for track in mid.tracks: |
|
tick = 0 |
|
for event in track: |
|
event.time -= tick |
|
tick += event.time |
|
|
|
if isinstance(filename, six.string_types) or isinstance(filename, pathlib.PurePath): |
|
|
|
mid.save(filename=filename) |
|
else: |
|
|
|
mid.save(file=filename) |
|
|
|
|
|
def get_midi_data(self): |
|
"""Write the MIDI data out to a .mid file. |
|
|
|
Parameters |
|
---------- |
|
filename : str or file |
|
Path or file to write .mid file to. |
|
|
|
""" |
|
|
|
def event_compare(event1, event2): |
|
"""Compares two events for sorting. |
|
|
|
Events are sorted by tick time ascending. Events with the same tick |
|
time ares sorted by event type. Some events are sorted by |
|
additional values. For example, Note On events are sorted by pitch |
|
then velocity, ensuring that a Note Off (Note On with velocity 0) |
|
will never follow a Note On with the same pitch. |
|
|
|
Parameters |
|
---------- |
|
event1, event2 : mido.Message |
|
Two events to be compared. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
secondary_sort = { |
|
'set_tempo': lambda e: (1 * 256 * 256), |
|
'time_signature': lambda e: (2 * 256 * 256), |
|
'key_signature': lambda e: (3 * 256 * 256), |
|
'lyrics': lambda e: (4 * 256 * 256), |
|
'text_events' :lambda e: (5 * 256 * 256), |
|
'program_change': lambda e: (6 * 256 * 256), |
|
'pitchwheel': lambda e: ((7 * 256 * 256) + e.pitch), |
|
'control_change': lambda e: ( |
|
(8 * 256 * 256) + (e.control * 256) + e.value), |
|
'note_off': lambda e: ((9 * 256 * 256) + (e.note * 256)), |
|
'note_on': lambda e: ( |
|
(10 * 256 * 256) + (e.note * 256) + e.velocity), |
|
'end_of_track': lambda e: (11 * 256 * 256) |
|
} |
|
|
|
|
|
|
|
if (event1.time == event2.time and |
|
event1.type in secondary_sort and |
|
event2.type in secondary_sort): |
|
return (secondary_sort[event1.type](event1) - |
|
secondary_sort[event2.type](event2)) |
|
|
|
return event1.time - event2.time |
|
|
|
|
|
mid = mido.MidiFile(ticks_per_beat=self.resolution, charset=self._charset) |
|
|
|
timing_track = mido.MidiTrack() |
|
|
|
add_ts = True |
|
if self.time_signature_changes: |
|
add_ts = min([ts.time for ts in self.time_signature_changes]) > 0.0 |
|
if add_ts: |
|
|
|
timing_track.append(mido.MetaMessage( |
|
'time_signature', time=0, numerator=4, denominator=4)) |
|
|
|
|
|
for (tick, tick_scale) in self._tick_scales: |
|
timing_track.append(mido.MetaMessage( |
|
'set_tempo', time=tick, |
|
|
|
tempo=int(6e7/(60./(tick_scale*self.resolution))))) |
|
|
|
for ts in self.time_signature_changes: |
|
timing_track.append(mido.MetaMessage( |
|
'time_signature', time=self.time_to_tick(ts.time), |
|
numerator=ts.numerator, denominator=ts.denominator)) |
|
|
|
|
|
|
|
key_number_to_mido_key_name = [ |
|
'C', 'Db', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B', |
|
'Cm', 'C#m', 'Dm', 'D#m', 'Em', 'Fm', 'F#m', 'Gm', 'G#m', 'Am', |
|
'Bbm', 'Bm'] |
|
for ks in self.key_signature_changes: |
|
timing_track.append(mido.MetaMessage( |
|
'key_signature', time=self.time_to_tick(ks.time), |
|
key=key_number_to_mido_key_name[ks.key_number])) |
|
|
|
for l in self.lyrics: |
|
timing_track.append(mido.MetaMessage( |
|
'lyrics', time=self.time_to_tick(l.time), text=l.text)) |
|
|
|
for l in self.text_events: |
|
timing_track.append(mido.MetaMessage( |
|
'text', time=self.time_to_tick(l.time), text=l.text)) |
|
|
|
timing_track.sort(key=functools.cmp_to_key(event_compare)) |
|
|
|
timing_track.append(mido.MetaMessage( |
|
'end_of_track', time=timing_track[-1].time + 1)) |
|
mid.tracks.append(timing_track) |
|
|
|
|
|
channels = list(range(16)) |
|
|
|
channels.remove(9) |
|
for n, instrument in enumerate(self.instruments): |
|
|
|
track = mido.MidiTrack() |
|
|
|
if instrument.name: |
|
track.append(mido.MetaMessage( |
|
'track_name', time=0, name=instrument.name)) |
|
|
|
if instrument.is_drum: |
|
channel = 9 |
|
|
|
else: |
|
channel = channels[n % len(channels)] |
|
|
|
track.append(mido.Message( |
|
'program_change', time=0, program=instrument.program, |
|
channel=channel)) |
|
|
|
for note in instrument.notes: |
|
|
|
track.append(mido.Message( |
|
'note_on', time=self.time_to_tick(note.start), |
|
channel=channel, note=note.pitch, velocity=note.velocity)) |
|
|
|
track.append(mido.Message( |
|
'note_on', time=self.time_to_tick(note.end), |
|
channel=channel, note=note.pitch, velocity=0)) |
|
|
|
for bend in instrument.pitch_bends: |
|
track.append(mido.Message( |
|
'pitchwheel', time=self.time_to_tick(bend.time), |
|
channel=channel, pitch=bend.pitch)) |
|
|
|
for control_change in instrument.control_changes: |
|
track.append(mido.Message( |
|
'control_change', |
|
time=self.time_to_tick(control_change.time), |
|
channel=channel, control=control_change.number, |
|
value=control_change.value)) |
|
|
|
track = sorted(track, key=functools.cmp_to_key(event_compare)) |
|
|
|
|
|
|
|
for n, (event1, event2) in enumerate(zip(track[:-1], track[1:])): |
|
if (event1.time == event2.time and |
|
event1.type == 'note_on' and |
|
event2.type == 'note_on' and |
|
event1.note == event2.note and |
|
event1.velocity != 0 and |
|
event2.velocity == 0): |
|
track[n] = event2 |
|
track[n + 1] = event1 |
|
|
|
track.append(mido.MetaMessage( |
|
'end_of_track', time=track[-1].time + 1)) |
|
|
|
mid.tracks.append(track) |
|
|
|
for track in mid.tracks: |
|
tick = 0 |
|
for event in track: |
|
event.time -= tick |
|
tick += event.time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
buffer = io.BytesIO() |
|
mid.save(file=buffer) |
|
|
|
|
|
|
|
|
|
return buffer.getvalue() |
|
|