File size: 6,501 Bytes
810e364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
915be1c
 
 
810e364
8fcfbac
810e364
1a8538a
f105af5
 
 
 
 
 
 
8fcfbac
f105af5
 
 
 
 
 
 
 
 
 
 
1a8538a
 
 
f105af5
 
 
1a8538a
 
 
 
 
 
8fcfbac
 
3eaac00
810e364
3eaac00
f105af5
8fcfbac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810e364
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
import collections
#import datetime
#import glob
import numpy as np
#import pathlib
import pandas as pd
import pretty_midi
import seaborn as sns

from matplotlib import pyplot as plt
from typing import Optional

import tensorflow as tf

import keras

from tensorflow.keras.utils import custom_object_scope

import streamlit as st

from midi2audio import FluidSynth
import tempfile
import os
import base64

def midi_to_notes(midi_file: str) -> pd.DataFrame:
  pm = pretty_midi.PrettyMIDI(midi_file)
  instrument = pm.instruments[0]
  notes = collections.defaultdict(list)

  sorted_notes = sorted(instrument.notes, key=lambda note: note.start)
  prev_start = sorted_notes[0].start

  for note in sorted_notes:
    start = note.start
    end = note.end
    notes['pitch'].append(note.pitch)
    notes['start'].append(start)
    notes['end'].append(end)
    notes['step'].append(start - prev_start)
    notes['duration'].append(end - start)
    prev_start = start

  return pd.DataFrame({name: np.array(value) for name, value in notes.items()})

def notes_to_midi(
  notes: pd.DataFrame,
  out_file: str,
  instrument_name: str,
  velocity: int = 100,
) -> pretty_midi.PrettyMIDI:

  pm = pretty_midi.PrettyMIDI()
  instrument = pretty_midi.Instrument(
      program=pretty_midi.instrument_name_to_program(
          instrument_name))

  prev_start = 0
  for i, note in notes.iterrows():
    start = float(prev_start + note['step'])
    end = float(start + note['duration'])
    note = pretty_midi.Note(
        velocity=velocity,
        pitch=int(note['pitch']),
        start=start,
        end=end,
    )
    instrument.notes.append(note)
    prev_start = start

  pm.instruments.append(instrument)
  pm.write(out_file)
  return pm

def plot_roll(notes: pd.DataFrame, count: Optional[int] = None):
  if count:
    title = f'First {count} notes'
  else:
    title = f'Whole track'
    count = len(notes['pitch'])
  plt.figure(figsize=(20, 4))
  plot_pitch = np.stack([notes['pitch'], notes['pitch']], axis=0)
  plot_start_stop = np.stack([notes['start'], notes['end']], axis=0)
  plt.plot(
      plot_start_stop[:, :count], plot_pitch[:, :count], color="b", marker=".")
  plt.xlabel('Time [s]')
  plt.ylabel('Pitch')
  _ = plt.title(title)
  
def plot_distributions(notes: pd.DataFrame, drop_percentile=2.5):
  plt.figure(figsize=[15, 5])
  plt.subplot(1, 3, 1)
  sns.histplot(notes, x="pitch", bins=20)

  plt.subplot(1, 3, 2)
  max_step = np.percentile(notes['step'], 100 - drop_percentile)
  sns.histplot(notes, x="step", bins=np.linspace(0, max_step, 21))
  
def predict_next_note(
    notes: np.ndarray,
    model: tf.keras.Model,
    temperature: float = 1.0) -> tuple[int, float, float]:

  assert temperature > 0

  inputs = tf.expand_dims(notes, 0)

  predictions = model.predict(inputs)
  pitch_logits = predictions['pitch']
  step = predictions['step']
  duration = predictions['duration']

  pitch_logits /= temperature
  pitch = tf.random.categorical(pitch_logits, num_samples=1)
  pitch = tf.squeeze(pitch, axis=-1)
  duration = tf.squeeze(duration, axis=-1)
  step = tf.squeeze(step, axis=-1)

  step = tf.maximum(0, step)
  duration = tf.maximum(0, duration)

  return int(pitch), float(step), float(duration)

def mse_with_positive_pressure(y_true: tf.Tensor, y_pred: tf.Tensor):
  mse = (y_true - y_pred) ** 2
  positive_pressure = 10 * tf.maximum(-y_pred, 0.0)
  return tf.reduce_mean(mse + positive_pressure)

def main():
    seed = 42
    tf.random.set_seed(seed)
    np.random.seed(seed)

    st.title('GENERADOR DE MELODIAS CON RNN LSTM')

    
    # Rutas de archivos
    #sample_file = 'Preludes 2 Through Major keys 39.mid'
    out_file = 'output.mid'
    uploaded_file = st.file_uploader("Sube un archivo MIDI")
    
    model=''
    pesos=''
    
    option = st.selectbox(
    "Elige con que modelo entrenar",
    ("Maestro", "Lakh"))

    
    
    if uploaded_file and option is not None:

        if option="Maestro":
            model="mi_modelo_music.h5"
            pesos="mi_pesos_music.h5"
        else:
            model="mi_modelo03_music.h5"
            pesos="mi_pesos03_music.h5"
        
        st.subheader("Archivo cargado:")
        st.write(uploaded_file.name)

        st.subheader("Modelo elegido:")
        st.write(option)
        
        # Guardar el archivo en una ubicación temporal
        with open(uploaded_file.name, 'wb') as f:
            f.write(uploaded_file.getbuffer())
            
        sample_file=uploaded_file.name
        
        # Cargar modelo y pesos
        with custom_object_scope({'mse_with_positive_pressure': mse_with_positive_pressure}):
            model = keras.models.load_model(model)
    
        model.load_weights(pesos, skip_mismatch=False, by_name=False, options=None)
        
        # Convertir MIDI generado por el modelo a archivo WAV
        pm = pretty_midi.PrettyMIDI(sample_file)
        instrument_name = pretty_midi.program_to_instrument_name(pm.instruments[0].program)
        raw_notes = midi_to_notes(sample_file)
        key_order = ['pitch', 'step', 'duration']
        seq_length = 25
        vocab_size = 128
        temperature = 2.0
        num_predictions = 120
        sample_notes = np.stack([raw_notes[key] for key in key_order], axis=1)
        input_notes = (sample_notes[:seq_length] / np.array([vocab_size, 1, 1]))
        generated_notes = []
        prev_start = 0
        for _ in range(num_predictions):
            pitch, step, duration = predict_next_note(input_notes, model, temperature)
            start = prev_start + step
            end = start + duration
            input_note = (pitch, step, duration)
            generated_notes.append((*input_note, start, end))
            input_notes = np.delete(input_notes, 0, axis=0)
            input_notes = np.append(input_notes, np.expand_dims(input_note, 0), axis=0)
            prev_start = start
    
        generated_notes = pd.DataFrame(
            generated_notes, columns=(*key_order, 'start', 'end'))
    
        notes_to_midi(
            generated_notes, out_file=out_file, instrument_name=instrument_name)
    
        # Interfaz de Streamlit
        st.title("Generador de notas musicales")
    
        archivo_midi = open(out_file, 'rb').read()
        
        st.download_button(
            label="Descargar MIDI",
            data=archivo_midi,
            file_name=out_file,  # Nombre del archivo que se descargará
            mime='audio/midi'  
        )

if __name__ == "__main__":
    main()