patchbanks commited on
Commit
8df70dd
·
verified ·
1 Parent(s): e32d429

Upload 18 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sf2_kits/drum_breaks.sf2 filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model License and Usage Terms
2
+ -----------------------------
3
+
4
+ Copyright © 2024 Patchbanks. All rights reserved.
5
+
6
+ The outputs generated by this model are granted royalty-free for personal use
7
+ and commercial entertainment purposes, including but not limited to songs, albums,
8
+ videos, or other forms of digital media released by an individual. However, these
9
+ outputs may not be used, resold, or licensed for commercial production, which
10
+ includes, but is not limited to, profiting from sound libraries, audio and MIDI
11
+ sample packs, stock media, subscription platforms, or generative AI platforms.
12
+ Additionally, the collection of generated output data for the purpose of training
13
+ AI models or machine learning systems is strictly prohibited.
14
+
15
+ The model weights, along with any derivatives or modifications, are also restricted
16
+ from commercial use and may not be sold, licensed, deployed on cloud servers,
17
+ or otherwise distributed for commercial purposes.
18
+
19
+
20
+ Model Architecture License
21
+ ---------------------------
22
+
23
+ MIT License
24
+
25
+ Copyright (c) 2022 Andrej Karpathy
26
+
27
+ Permission is hereby granted, free of charge, to any person obtaining a copy
28
+ of this software and associated documentation files (the "Software"), to deal
29
+ in the Software without restriction, including without limitation the rights
30
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
31
+ copies of the Software, and to permit persons to whom the Software is
32
+ furnished to do so, subject to the following conditions:
33
+
34
+ The above copyright notice and this permission notice shall be included in all
35
+ copies or substantial portions of the Software.
36
+
37
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
42
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
43
+ SOFTWARE.
app.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import torch
4
+ import random
5
+ import subprocess
6
+ import re
7
+ import pretty_midi
8
+ import gradio as gr
9
+ from contextlib import nullcontext
10
+ from model import GPTConfig, GPT
11
+ from pedalboard import Pedalboard, Reverb, Compressor, Gain, Limiter
12
+ from pedalboard.io import AudioFile
13
+ import gradio as gr
14
+ import spaces
15
+
16
+ in_space = os.getenv("SYSTEM") == "spaces"
17
+
18
+ temp_dir = 'temp'
19
+ os.makedirs(temp_dir, exist_ok=True)
20
+
21
+ init_from = 'resume'
22
+ out_dir = 'checkpoints'
23
+ ckpt_load = 'model.pt'
24
+
25
+ start = "000000000000\n"
26
+ num_samples = 1
27
+ max_new_tokens = 768
28
+
29
+ seed = random.randint(1, 100000)
30
+ torch.manual_seed(seed)
31
+ device = 'cpu' if torch.cuda.is_available() else 'cpu'
32
+ dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16'
33
+ compile = False
34
+ exec(open('configurator.py').read())
35
+
36
+
37
+ torch.backends.cuda.matmul.allow_tf32 = True
38
+ torch.backends.cudnn.allow_tf32 = True
39
+ device_type = 'cpu' if 'cuda' in device else 'cpu'
40
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
41
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
42
+
43
+ if init_from == 'resume':
44
+ ckpt_path = os.path.join(out_dir, ckpt_load)
45
+ checkpoint = torch.load(ckpt_path, map_location=device, weights_only=True)
46
+ gptconf = GPTConfig(**checkpoint['model_args'])
47
+ model = GPT(gptconf)
48
+ state_dict = checkpoint['model']
49
+ unwanted_prefix = '_orig_mod.'
50
+ for k, v in list(state_dict.items()):
51
+ if k.startswith(unwanted_prefix):
52
+ state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
53
+ model.load_state_dict(state_dict)
54
+ elif init_from.startswith('gpt2'):
55
+ model = GPT.from_pretrained(init_from, dict(dropout=0.0))
56
+
57
+ model.eval()
58
+ model.to(device)
59
+ if compile:
60
+ model = torch.compile(model)
61
+
62
+ tokenizer = re.compile(r'000000000000|\d{2}|\n')
63
+
64
+ meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
65
+ with open(meta_path, 'rb') as f:
66
+ meta = pickle.load(f)
67
+ stoi = meta.get('stoi', None)
68
+ itos = meta.get('itos', None)
69
+
70
+ def encode(text):
71
+ matches = tokenizer.findall(text)
72
+ return [stoi[c] for c in matches]
73
+
74
+ def decode(encoded):
75
+ return ''.join([itos[i] for i in encoded])
76
+
77
+ def clear_midi(dir):
78
+ for file in os.listdir(dir):
79
+ if file.endswith('.mid'):
80
+ os.remove(os.path.join(dir, file))
81
+
82
+ clear_midi(temp_dir)
83
+
84
+
85
+ def generate_midi(temperature, top_k):
86
+ start_ids = encode(start)
87
+ x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
88
+
89
+ midi_events = []
90
+ seq_count = 0
91
+
92
+ with torch.no_grad():
93
+ for _ in range(num_samples):
94
+ sequence = []
95
+ y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
96
+ tkn_seq = decode(y[0].tolist())
97
+ lines = tkn_seq.splitlines()
98
+
99
+ for event in lines:
100
+ if event.startswith(start.strip()):
101
+ if sequence:
102
+ midi_events.append(sequence)
103
+ sequence = []
104
+ seq_count += 1
105
+ elif event.strip() == "":
106
+ continue
107
+ else:
108
+ try:
109
+ p = int(event[0:2])
110
+ v = int(event[2:4])
111
+ s = int(event[4:8])
112
+ e = int(event[8:12])
113
+ except ValueError:
114
+ p, v, s, e = 0, 0, 0, 0
115
+ sequence.append({'file_name': f'nanompc_{seq_count:02d}', 'pitch': p, 'velocity': v, 'start': s, 'end': e})
116
+
117
+ if sequence:
118
+ midi_events.append(sequence)
119
+
120
+ round_bars = []
121
+
122
+ for sequence in midi_events:
123
+ filtered_sequence = []
124
+ for event in sequence:
125
+ if event['start'] < 1536 and event['end'] <= 1536:
126
+ filtered_sequence.append(event)
127
+ if filtered_sequence:
128
+ round_bars.append(filtered_sequence)
129
+
130
+ midi_events = round_bars
131
+
132
+ for track in midi_events:
133
+ track.sort(key=lambda x: x['start'])
134
+ unique_notes = []
135
+
136
+ for note in track:
137
+ if not any(abs(note['start'] - n['start']) < 12 and note['pitch'] == n['pitch'] for n in unique_notes):
138
+ unique_notes.append(note)
139
+
140
+ track[:] = unique_notes
141
+
142
+ return midi_events
143
+
144
+
145
+ def write_single_midi(midi_events, bpm):
146
+ midi_data = pretty_midi.PrettyMIDI(initial_tempo=bpm, resolution=96)
147
+ midi_data.time_signature_changes.append(pretty_midi.containers.TimeSignature(4, 4, 0))
148
+ instrument = pretty_midi.Instrument(0)
149
+ midi_data.instruments.append(instrument)
150
+
151
+ for event in midi_events[0]:
152
+ pitch = event['pitch']
153
+ velocity = event['velocity']
154
+ start = midi_data.tick_to_time(event['start'])
155
+ end = midi_data.tick_to_time(event['end'])
156
+ note = pretty_midi.Note(pitch=pitch, velocity=velocity, start=start, end=end)
157
+ instrument.notes.append(note)
158
+
159
+ midi_path = os.path.join(temp_dir, 'output.mid')
160
+ midi_data.write(midi_path)
161
+ print(f"Generated: {midi_path}")
162
+
163
+
164
+ def render_wav(midi_file, uploaded_sf2=None, output_level='2.0'):
165
+ sf2_dir = 'sf2_kits'
166
+ audio_format = 's16'
167
+ sample_rate = '44100'
168
+ gain = str(output_level)
169
+
170
+ if uploaded_sf2:
171
+ sf2_file = uploaded_sf2
172
+ else:
173
+ sf2_files = [f for f in os.listdir(os.path.join(sf2_dir)) if f.endswith('.sf2')]
174
+ if not sf2_files:
175
+ raise ValueError("No SoundFont (.sf2) file found in directory.")
176
+ sf2_file = os.path.join(sf2_dir, random.choice(sf2_files))
177
+
178
+ output_wav = os.path.join(temp_dir, 'output.wav')
179
+
180
+ with open(os.devnull, 'w') as devnull:
181
+ command = [
182
+ 'fluidsynth', '-ni', sf2_file, midi_file, '-F', output_wav, '-r', str(sample_rate),
183
+ '-o', f'audio.file.format={audio_format}', '-g', str(gain)
184
+ ]
185
+ subprocess.call(command, stdout=devnull, stderr=devnull)
186
+
187
+ return output_wav
188
+
189
+
190
+ def generate_and_return_files(bpm, temperature, top_k, uploaded_sf2=None, output_level='2.0'):
191
+ midi_events = generate_midi(temperature, top_k)
192
+ if not midi_events:
193
+ return "Error generating MIDI.", None, None
194
+
195
+ write_single_midi(midi_events, bpm)
196
+
197
+ midi_file = os.path.join(temp_dir, 'output.mid')
198
+ wav_raw = render_wav(midi_file, uploaded_sf2, output_level)
199
+ wav_fx = os.path.join(temp_dir, 'output_fx.wav')
200
+
201
+ sfx_settings = [
202
+ {
203
+ 'board': Pedalboard([
204
+ Reverb(room_size=0.01, wet_level=random.uniform(0.005, 0.01), dry_level=0.75, width=1.0),
205
+ Compressor(threshold_db=-3.0, ratio=8.0, attack_ms=0.0, release_ms=300.0),
206
+ ])
207
+ }
208
+ ]
209
+
210
+ for setting in sfx_settings:
211
+ board = setting['board']
212
+
213
+ with AudioFile(wav_raw) as f:
214
+ with AudioFile(wav_fx, 'w', f.samplerate, f.num_channels) as o:
215
+ while f.tell() < f.frames:
216
+ chunk = f.read(int(f.samplerate))
217
+ effected = board(chunk, f.samplerate, reset=False)
218
+ o.write(effected)
219
+
220
+ return midi_file, wav_fx
221
+
222
+
223
+ custom_css = """
224
+ #container {
225
+ max-width: 1200px !important;
226
+ margin: 0 auto !important;
227
+ }
228
+ #generate-btn {
229
+ font-size: 18px;
230
+ color: white;
231
+ padding: 10px 20px;
232
+ border: none;
233
+ border-radius: 5px;
234
+ cursor: pointer;
235
+ background: linear-gradient(90deg, hsla(268, 90%, 70%, 1) 0%, hsla(260, 72%, 74%, 1) 50%, hsla(247, 73%, 65%, 1) 100%);
236
+ transition: background 1s ease;
237
+ }
238
+ #generate-btn:hover {
239
+ color: white;
240
+ background: linear-gradient(90deg, hsla(268, 90%, 62%, 1) 0%, hsla(260, 70%, 70%, 1) 50%, hsla(247, 73%, 55%, 1) 100%);
241
+ }
242
+ #container .prose {
243
+ text-align: center !important;
244
+ }
245
+ #container h1 {
246
+ font-weight: bold;
247
+ font-size: 40px;
248
+ margin: 0px;
249
+ }
250
+ #container p {
251
+ font-size: 18px;
252
+ text-align: center;
253
+ }
254
+
255
+ """
256
+
257
+ with gr.Blocks(
258
+ css=custom_css,
259
+ theme=gr.themes.Default(
260
+ font=[gr.themes.GoogleFont("Roboto"), "sans-serif"],
261
+ primary_hue="violet",
262
+ secondary_hue="violet"
263
+ )
264
+ ) as iface:
265
+ with gr.Column(elem_id="container"):
266
+ gr.Markdown("<h1>Neural Breaks</h1>")
267
+ gr.Markdown("<p>Neural Breaks is a generative MIDI model trained **on** dynamic transcriptions of funk and soul drum breaks.</p>")
268
+
269
+ bpm = gr.Slider(minimum=50, maximum=200, step=1, value=100, label="BPM")
270
+ temperature = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Temperature")
271
+ top_k = gr.Slider(minimum=4, maximum=16, step=1, value=8, label="Top-k")
272
+ output_level = gr.Slider(minimum=0, maximum=3, step=0.10, value=2.0, label="Output Gain")
273
+ generate_button = gr.Button("Generate", elem_id="generate-btn")
274
+ midi_file = gr.File(label="MIDI Output")
275
+ audio_file = gr.Audio(label="Audio Output", type="filepath")
276
+ soundfont = gr.File(label="Optional: Upload SoundFont (preset=0, bank=0)")
277
+
278
+ generate_button.click(
279
+ fn=generate_and_return_files,
280
+ inputs=[bpm, temperature, top_k, soundfont, output_level],
281
+ outputs=[midi_file, audio_file]
282
+ )
283
+
284
+ gr.Markdown("<p style='font-size: 16px;'>Developed by <a href='https://www.patchbanks.com/' target='_blank'><strong>Patchbanks</strong></a></p>")
285
+
286
+ iface.launch(share=True)
checkpoints/.DS_Store ADDED
Binary file (6.15 kB). View file
 
checkpoints/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc9998fce7cc7b59f949df3ec0dacd12fcc0baf15c8cb9e5f0fa62922e45030d
3
+ size 24327014
config/eval_gpt2.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # evaluate the base gpt2
2
+ # n_layer=12, n_head=12, n_embd=768
3
+ # 124M parameters
4
+ batch_size = 8
5
+ eval_iters = 500 # use more iterations to get good estimate
6
+ eval_only = True
7
+ wandb_log = False
8
+ init_from = 'gpt2'
config/eval_gpt2_large.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # evaluate the base gpt2
2
+ # n_layer=36, n_head=20, n_embd=1280
3
+ # 774M parameters
4
+ batch_size = 8
5
+ eval_iters = 500 # use more iterations to get good estimate
6
+ eval_only = True
7
+ wandb_log = False
8
+ init_from = 'gpt2-large'
config/eval_gpt2_medium.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # evaluate the base gpt2
2
+ # n_layer=24, n_head=16, n_embd=1024
3
+ # 350M parameters
4
+ batch_size = 8
5
+ eval_iters = 500 # use more iterations to get good estimate
6
+ eval_only = True
7
+ wandb_log = False
8
+ init_from = 'gpt2-medium'
config/eval_gpt2_xl.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # evaluate the base gpt2
2
+ # n_layer=48, n_head=25, n_embd=1600
3
+ # 1558M parameters
4
+ batch_size = 8
5
+ eval_iters = 500 # use more iterations to get good estimate
6
+ eval_only = True
7
+ wandb_log = False
8
+ init_from = 'gpt2-xl'
config/finetune_shakespeare.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ out_dir = 'out-shakespeare'
4
+ eval_interval = 5
5
+ eval_iters = 40
6
+ wandb_log = False # feel free to turn on
7
+ wandb_project = 'shakespeare'
8
+ wandb_run_name = 'ft-' + str(time.time())
9
+
10
+ dataset = 'shakespeare'
11
+ init_from = 'gpt2-xl' # this is the largest GPT-2 model
12
+
13
+ # only save checkpoints if the validation loss improves
14
+ always_save_checkpoint = False
15
+
16
+ # the number of examples per iter:
17
+ # 1 batch_size * 32 grad_accum * 1024 tokens = 32,768 tokens/iter
18
+ # shakespeare has 301,966 tokens, so 1 epoch ~= 9.2 iters
19
+ batch_size = 1
20
+ gradient_accumulation_steps = 32
21
+ max_iters = 20
22
+
23
+ # finetune at constant LR
24
+ learning_rate = 3e-5
25
+ decay_lr = False
config/train_gpt2.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # config for training GPT-2 (124M) down to very nice loss of ~2.85 on 1 node of 8X A100 40GB
2
+ # launch as the following (e.g. in a screen session) and wait ~5 days:
3
+ # $ torchrun --standalone --nproc_per_node=8 train.py config/train_gpt2.py
4
+
5
+ wandb_log = True
6
+ wandb_project = 'owt'
7
+ wandb_run_name='gpt2-124M'
8
+
9
+ # these make the total batch size be ~0.5M
10
+ # 12 batch size * 1024 block size * 5 gradaccum * 8 GPUs = 491,520
11
+ batch_size = 12
12
+ block_size = 1024
13
+ gradient_accumulation_steps = 5 * 8
14
+
15
+ # this makes total number of tokens be 300B
16
+ max_iters = 600000
17
+ lr_decay_iters = 600000
18
+
19
+ # eval stuff
20
+ eval_interval = 1000
21
+ eval_iters = 200
22
+ log_interval = 10
23
+
24
+ # weight decay
25
+ weight_decay = 1e-1
config/train_shakespeare_char.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train a miniature character-level shakespeare model
2
+ # good for debugging and playing on macbooks and such
3
+
4
+ out_dir = 'out-shakespeare-char'
5
+ eval_interval = 250 # keep frequent because we'll overfit
6
+ eval_iters = 200
7
+ log_interval = 10 # don't print too too often
8
+
9
+ # we expect to overfit on this small dataset, so only save when val improves
10
+ always_save_checkpoint = False
11
+
12
+ wandb_log = False # override via command line if you like
13
+ wandb_project = 'shakespeare-char'
14
+ wandb_run_name = 'mini-gpt'
15
+
16
+ dataset = 'shakespeare_char'
17
+ gradient_accumulation_steps = 1
18
+ batch_size = 64
19
+ block_size = 256 # context of up to 256 previous characters
20
+
21
+ # baby GPT model :)
22
+ n_layer = 6
23
+ n_head = 6
24
+ n_embd = 384
25
+ dropout = 0.2
26
+
27
+ learning_rate = 1e-3 # with baby networks can afford to go a bit higher
28
+ max_iters = 5000
29
+ lr_decay_iters = 5000 # make equal to max_iters usually
30
+ min_lr = 1e-4 # learning_rate / 10 usually
31
+ beta2 = 0.99 # make a bit bigger because number of tokens per iter is small
32
+
33
+ warmup_iters = 100 # not super necessary potentially
34
+
35
+ # on macbook also add
36
+ # device = 'cpu' # run on cpu only
37
+ # compile = False # do not torch compile the model
configurator.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Poor Man's Configurator. Probably a terrible idea. Example usage:
3
+ $ python train.py config/override_file.py --batch_size=32
4
+ this will first run config/override_file.py, then override batch_size to 32
5
+
6
+ The code in this file will be run as follows from e.g. train.py:
7
+ >>> exec(open('configurator.py').read())
8
+
9
+ So it's not a Python module, it's just shuttling this code away from train.py
10
+ The code in this script then overrides the globals()
11
+
12
+ I know people are not going to love this, I just really dislike configuration
13
+ complexity and having to prepend config. to every single variable. If someone
14
+ comes up with a better simple Python solution I am all ears.
15
+ """
16
+
17
+ import sys
18
+ from ast import literal_eval
19
+ import argparse
20
+
21
+ parser = argparse.ArgumentParser(description="nanoMPC")
22
+ parser.add_argument("--bpm", type=int, default=90, help="Beats per minute")
23
+ parser.add_argument("--num_samples", type=int, default=1, help="Number of samples")
24
+ args, unknown_args = parser.parse_known_args() # Capture unknown args for configurator
25
+
26
+ bpm = args.bpm
27
+ num_samples = args.num_samples
28
+
29
+ for arg in unknown_args:
30
+ if arg.startswith('--'):
31
+ print(f"Skipping command-line argument: {arg}")
32
+ continue
33
+
34
+ if '=' not in arg:
35
+ config_file = arg
36
+ print(f"Overriding config with {config_file}:")
37
+ with open(config_file) as f:
38
+ print(f.read())
39
+ exec(open(config_file).read())
40
+ else:
41
+ key, val = arg.split('=')
42
+ key = key[2:]
43
+
44
+ if key in globals():
45
+ if key in ['bpm', 'num_samples']:
46
+ continue
47
+ try:
48
+ attempt = literal_eval(val)
49
+ except (SyntaxError, ValueError):
50
+ attempt = val
51
+ assert type(attempt) == type(globals()[key])
52
+ print(f"Overriding: {key} = {attempt}")
53
+ globals()[key] = attempt
54
+ else:
55
+ raise ValueError(f"Unknown config key: {key}")
56
+
57
+
data/neural_breaks/meta.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20108664c20f9b9e9393eb222b8d5f33b947c2c448e2c06f33e4f6fc557f0fbb
3
+ size 1184
model.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from dataclasses import dataclass
3
+ from torch.utils.data import DataLoader, Dataset
4
+ from torch.nn import functional as F
5
+ import torch.nn as nn
6
+ import torch
7
+ import math
8
+ import numpy as np
9
+
10
+
11
+ class RMSNorm(torch.nn.Module):
12
+ def __init__(self, dim: int, eps: float):
13
+ super().__init__()
14
+ self.eps = eps
15
+ self.weight = nn.Parameter(torch.ones(dim))
16
+
17
+ def _norm(self, x):
18
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
19
+
20
+ def forward(self, x):
21
+ output = self._norm(x.float()).type_as(x)
22
+ return output * self.weight
23
+
24
+
25
+ class CausalSelfAttention(nn.Module):
26
+ def __init__(self, config):
27
+ super().__init__()
28
+ assert config.n_embd % config.n_head == 0
29
+ self.config = config # Store the config object
30
+
31
+ self.n_head = config.n_head
32
+ self.n_embd = config.n_embd
33
+ self.dropout = config.dropout
34
+
35
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
36
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
37
+ self.attn_dropout = nn.Dropout(config.dropout)
38
+ self.resid_dropout = nn.Dropout(config.dropout)
39
+
40
+ self.rel_attn_bias = nn.Embedding(config.block_size * 2 - 1, config.n_head)
41
+
42
+ def forward(self, x):
43
+ B, T, C = x.size()
44
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
45
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
46
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
47
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
48
+
49
+ if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
50
+ attn_logits = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
51
+ else:
52
+ attn_logits = (q @ k.transpose(-2, -1)) / math.sqrt(C // self.n_head)
53
+ max_rpe = self.config.block_size // 2 # Use config object
54
+ rpe_matrix = self.generate_rpe(T, max_rpe).to(x.device)
55
+ rpe_embeddings = self.rel_attn_bias(rpe_matrix).transpose(1, 2).unsqueeze(0)
56
+ attn_logits = attn_logits + rpe_embeddings
57
+ attn_logits = attn_logits.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
58
+ attn_logits = F.softmax(attn_logits, dim=-1)
59
+ attn_logits = self.attn_dropout(attn_logits)
60
+ attn_logits = attn_logits @ v
61
+
62
+ y = attn_logits.transpose(1, 2).contiguous().view(B, T, C)
63
+ y = self.resid_dropout(self.c_proj(y))
64
+ return y
65
+
66
+ def generate_rpe(self, length, max_rpe):
67
+ range_vec = torch.arange(length)
68
+ range_mat = range_vec.unsqueeze(0) - range_vec.unsqueeze(1)
69
+ range_mat_clipped = torch.clamp(range_mat, -max_rpe, max_rpe)
70
+ final_mat = range_mat_clipped + max_rpe
71
+ return final_mat
72
+
73
+
74
+ class MLP(nn.Module):
75
+
76
+ def __init__(self, config):
77
+ super().__init__()
78
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
79
+ self.gelu = nn.GELU()
80
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
81
+ self.dropout = nn.Dropout(config.dropout)
82
+
83
+ def forward(self, x):
84
+ x = self.c_fc(x)
85
+ x = self.gelu(x)
86
+ x = self.c_proj(x)
87
+ x = self.dropout(x)
88
+ return x
89
+
90
+ class Block(nn.Module):
91
+ def __init__(self, config):
92
+ super().__init__()
93
+ self.ln_1 = RMSNorm(config.n_embd, eps=1e-5)
94
+ self.attn = CausalSelfAttention(config)
95
+ self.ln_2 = RMSNorm(config.n_embd, eps=1e-5)
96
+ self.mlp = MLP(config)
97
+
98
+ def forward(self, x):
99
+ x = x + self.attn(self.ln_1(x))
100
+ x = x + self.mlp(self.ln_2(x))
101
+ return x
102
+
103
+ @dataclass
104
+ class GPTConfig:
105
+ block_size: int = 1024
106
+ vocab_size: int = 50304
107
+ n_layer: int = 12
108
+ n_head: int = 12
109
+ n_embd: int = 768
110
+ dropout: float = 0.0
111
+ bias: bool = True
112
+
113
+ class GPT(nn.Module):
114
+ def __init__(self, config):
115
+ super().__init__()
116
+ assert config.vocab_size is not None
117
+ assert config.block_size is not None
118
+ self.config = config
119
+
120
+ self.transformer = nn.ModuleDict(dict(
121
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
122
+ wpe = nn.Embedding(config.block_size, config.n_embd),
123
+ drop = nn.Dropout(config.dropout),
124
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
125
+ ln_f = RMSNorm(config.n_embd, eps=1e-5),
126
+ ))
127
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
128
+ self.transformer.wte.weight = self.lm_head.weight
129
+
130
+ self.apply(self._init_weights)
131
+ for pn, p in self.named_parameters():
132
+ if pn.endswith('c_proj.weight'):
133
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
134
+
135
+ #print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
136
+
137
+ def get_num_params(self, non_embedding=True):
138
+ n_params = sum(p.numel() for p in self.parameters())
139
+ if non_embedding:
140
+ n_params -= self.transformer.wpe.weight.numel()
141
+ return n_params
142
+
143
+ def _init_weights(self, module):
144
+ if isinstance(module, nn.Linear):
145
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
146
+ if module.bias is not None:
147
+ torch.nn.init.zeros_(module.bias)
148
+ elif isinstance(module, nn.Embedding):
149
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
150
+
151
+ def forward(self, idx, targets=None, noise_pct=0.1):
152
+ device = idx.device
153
+ b, t = idx.size()
154
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
155
+ pos = torch.arange(0, t, dtype=torch.long, device=device)
156
+
157
+ tok_emb = self.transformer.wte(idx)
158
+ pos_emb = self.transformer.wpe(pos)
159
+
160
+ # add noise to the input
161
+ if noise_pct > 0.0:
162
+ noise_std = torch.std(tok_emb) * noise_pct
163
+ noise = torch.randn_like(tok_emb) * noise_std
164
+ tok_emb = tok_emb + noise
165
+
166
+ x = self.transformer.drop(tok_emb + pos_emb)
167
+ for block in self.transformer.h:
168
+ x = block(x)
169
+ x = self.transformer.ln_f(x)
170
+
171
+ if targets is not None:
172
+
173
+ logits = self.lm_head(x)
174
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
175
+ else:
176
+
177
+ logits = self.lm_head(x[:, [-1], :])
178
+ loss = None
179
+
180
+ return logits, loss
181
+
182
+
183
+ def crop_block_size(self, block_size):
184
+ assert block_size <= self.config.block_size
185
+ self.config.block_size = block_size
186
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
187
+ for block in self.transformer.h:
188
+ if hasattr(block.attn, 'bias'):
189
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
190
+
191
+ @classmethod
192
+ def from_pretrained(cls, model_type, override_args=None):
193
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
194
+ override_args = override_args or {}
195
+
196
+ assert all(k == 'dropout' for k in override_args)
197
+ from transformers import GPT2LMHeadModel
198
+ print("loading weights from pretrained gpt: %s" % model_type)
199
+
200
+ config_args = {
201
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768),
202
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024),
203
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280),
204
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600),
205
+ }[model_type]
206
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
207
+ config_args['vocab_size'] = 50257
208
+ config_args['block_size'] = 1024
209
+ config_args['bias'] = True
210
+
211
+ if 'dropout' in override_args:
212
+ print(f"overriding dropout rate to {override_args['dropout']}")
213
+ config_args['dropout'] = override_args['dropout']
214
+
215
+ config = GPTConfig(**config_args)
216
+ model = GPT(config)
217
+ sd = model.state_dict()
218
+ sd_keys = sd.keys()
219
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')]
220
+
221
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
222
+ sd_hf = model_hf.state_dict()
223
+
224
+ sd_keys_hf = sd_hf.keys()
225
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')]
226
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')]
227
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
228
+
229
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
230
+ for k in sd_keys_hf:
231
+ if any(k.endswith(w) for w in transposed):
232
+
233
+ assert sd_hf[k].shape[::-1] == sd[k].shape
234
+ with torch.no_grad():
235
+ sd[k].copy_(sd_hf[k].t())
236
+ else:
237
+
238
+ assert sd_hf[k].shape == sd[k].shape
239
+ with torch.no_grad():
240
+ sd[k].copy_(sd_hf[k])
241
+
242
+ return model
243
+
244
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
245
+
246
+ param_dict = {pn: p for pn, p in self.named_parameters()}
247
+
248
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
249
+
250
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
251
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
252
+ optim_groups = [
253
+ {'params': decay_params, 'weight_decay': weight_decay},
254
+ {'params': nodecay_params, 'weight_decay': 0.0}
255
+ ]
256
+ num_decay_params = sum(p.numel() for p in decay_params)
257
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
258
+ print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
259
+ print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
260
+
261
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
262
+ use_fused = fused_available and device_type == 'cuda'
263
+ extra_args = dict(fused=True) if use_fused else dict()
264
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
265
+ print(f"using fused AdamW: {use_fused}")
266
+
267
+ return optimizer
268
+
269
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
270
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
271
+
272
+ N = self.get_num_params()
273
+ cfg = self.config
274
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
275
+ flops_per_token = 6*N + 12*L*H*Q*T
276
+ flops_per_fwdbwd = flops_per_token * T
277
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
278
+
279
+ flops_achieved = flops_per_iter * (1.0/dt)
280
+ flops_promised = 312e12
281
+ mfu = flops_achieved / flops_promised
282
+ return mfu
283
+
284
+ @torch.no_grad()
285
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
286
+ for _ in range(max_new_tokens):
287
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
288
+ logits, _ = self(idx_cond)
289
+ logits = logits[:, -1, :] / temperature
290
+ if top_k is not None:
291
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
292
+ logits[logits < v[:, [-1]]] = -float('Inf')
293
+ probs = F.softmax(logits, dim=-1)
294
+ idx_next = torch.multinomial(probs, num_samples=1)
295
+
296
+ if idx_next.item() == 0: # stop token
297
+ break
298
+
299
+ idx = torch.cat((idx, idx_next), dim=1)
300
+
301
+ return idx
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fluidsynth
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pretty_midi==0.2.10
2
+ pedalboard==0.9.3
3
+ torch
4
+ gradio
sf2_kits/.DS_Store ADDED
Binary file (6.15 kB). View file
 
sf2_kits/drum_breaks.sf2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc32e717f84b48194933c9b929d3727306dede46589b76af9c2b1077b4221dd
3
+ size 31717174