sanchit-gandhi HF staff commited on
Commit
3c6a178
1 Parent(s): 183d1f6

End of training

Browse files
Files changed (5) hide show
  1. README.md +6 -4
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +388 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody-large
7
  model-index:
@@ -16,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanchit-gandhi/black-eyed-peas/runs/c636i67k)
17
  # black-eyed-peas-v1-crafted-variable-prompt-8-epochs-text-only-piano-prompts
18
 
19
- This model is a fine-tuned version of [facebook/musicgen-melody-large](https://huggingface.co/facebook/musicgen-melody-large) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 3.0955
22
- - Text Cosine Sim: 0.3840
23
- - Audio Cosine Sim: 0.5573
24
 
25
  ## Model description
26
 
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
+ - text-to-audio
6
+ - sweet-dreambooths/black-eyed-peas-v1-piano-prompts
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody-large
9
  model-index:
 
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanchit-gandhi/black-eyed-peas/runs/c636i67k)
19
  # black-eyed-peas-v1-crafted-variable-prompt-8-epochs-text-only-piano-prompts
20
 
21
+ This model is a fine-tuned version of [facebook/musicgen-melody-large](https://huggingface.co/facebook/musicgen-melody-large) on the SWEET-DREAMBOOTHS/BLACK-EYED-PEAS-V1-PIANO-PROMPTS - DEFAULT dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 3.0989
24
+ - Text Cosine Sim: 0.3982
25
+ - Audio Cosine Sim: 0.5996
26
 
27
  ## Model description
28
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.917525773195877,
3
+ "eval_audio_cosine_sim": 0.5995970368385315,
4
+ "eval_loss": 3.09893536567688,
5
+ "eval_runtime": 1924.8031,
6
+ "eval_samples": 12,
7
+ "eval_samples_per_second": 0.006,
8
+ "eval_steps_per_second": 0.006,
9
+ "eval_text_cosine_sim": 0.39815211296081543,
10
+ "total_flos": 962957869218000.0,
11
+ "train_loss": 8.715420136849085,
12
+ "train_runtime": 7523.7142,
13
+ "train_samples": 97,
14
+ "train_samples_per_second": 0.103,
15
+ "train_steps_per_second": 0.006
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.917525773195877,
3
+ "eval_audio_cosine_sim": 0.5995970368385315,
4
+ "eval_loss": 3.09893536567688,
5
+ "eval_runtime": 1924.8031,
6
+ "eval_samples": 12,
7
+ "eval_samples_per_second": 0.006,
8
+ "eval_steps_per_second": 0.006,
9
+ "eval_text_cosine_sim": 0.39815211296081543
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.917525773195877,
3
+ "total_flos": 962957869218000.0,
4
+ "train_loss": 8.715420136849085,
5
+ "train_runtime": 7523.7142,
6
+ "train_samples": 97,
7
+ "train_samples_per_second": 0.103,
8
+ "train_steps_per_second": 0.006
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.917525773195877,
5
+ "eval_steps": 30,
6
+ "global_step": 48,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16494845360824742,
13
+ "grad_norm": 1.6045022010803223,
14
+ "learning_rate": 2.9375e-05,
15
+ "loss": 10.1016,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.32989690721649484,
20
+ "grad_norm": 1.7116177082061768,
21
+ "learning_rate": 2.875e-05,
22
+ "loss": 10.2357,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.4948453608247423,
27
+ "grad_norm": 1.7441893815994263,
28
+ "learning_rate": 2.8125e-05,
29
+ "loss": 10.0531,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.6597938144329897,
34
+ "grad_norm": 2.1694791316986084,
35
+ "learning_rate": 2.75e-05,
36
+ "loss": 10.2016,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.8247422680412371,
41
+ "grad_norm": 2.1092519760131836,
42
+ "learning_rate": 2.6875000000000003e-05,
43
+ "loss": 10.0665,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.9896907216494846,
48
+ "grad_norm": 2.16532301902771,
49
+ "learning_rate": 2.625e-05,
50
+ "loss": 9.8524,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 1.1546391752577319,
55
+ "grad_norm": 2.3102128505706787,
56
+ "learning_rate": 2.5625e-05,
57
+ "loss": 9.9192,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 1.3195876288659794,
62
+ "grad_norm": 2.627959966659546,
63
+ "learning_rate": 2.5e-05,
64
+ "loss": 9.8474,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 1.4845360824742269,
69
+ "grad_norm": 3.065120220184326,
70
+ "learning_rate": 2.4375e-05,
71
+ "loss": 9.8735,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 1.6494845360824741,
76
+ "grad_norm": 2.6623458862304688,
77
+ "learning_rate": 2.3749999999999998e-05,
78
+ "loss": 9.6075,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 1.8144329896907216,
83
+ "grad_norm": 2.8638663291931152,
84
+ "learning_rate": 2.3125000000000003e-05,
85
+ "loss": 9.6006,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 1.9793814432989691,
90
+ "grad_norm": 3.157634735107422,
91
+ "learning_rate": 2.25e-05,
92
+ "loss": 9.511,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 2.1443298969072164,
97
+ "grad_norm": 3.286606788635254,
98
+ "learning_rate": 2.1875e-05,
99
+ "loss": 9.4582,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 2.3092783505154637,
104
+ "grad_norm": 3.352224588394165,
105
+ "learning_rate": 2.125e-05,
106
+ "loss": 9.3268,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 2.4742268041237114,
111
+ "grad_norm": 3.678314208984375,
112
+ "learning_rate": 2.0625e-05,
113
+ "loss": 9.3043,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 2.6391752577319587,
118
+ "grad_norm": 3.6437971591949463,
119
+ "learning_rate": 1.9999999999999998e-05,
120
+ "loss": 9.1926,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 2.804123711340206,
125
+ "grad_norm": 3.841749429702759,
126
+ "learning_rate": 1.9375e-05,
127
+ "loss": 9.0797,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 2.9690721649484537,
132
+ "grad_norm": 3.7387771606445312,
133
+ "learning_rate": 1.8750000000000002e-05,
134
+ "loss": 9.0155,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 3.134020618556701,
139
+ "grad_norm": 3.5350756645202637,
140
+ "learning_rate": 1.8125e-05,
141
+ "loss": 8.8982,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 3.2989690721649483,
146
+ "grad_norm": 3.9751718044281006,
147
+ "learning_rate": 1.7500000000000002e-05,
148
+ "loss": 8.8391,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 3.463917525773196,
153
+ "grad_norm": 3.5453691482543945,
154
+ "learning_rate": 1.6875e-05,
155
+ "loss": 8.8007,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 3.6288659793814433,
160
+ "grad_norm": 3.744359254837036,
161
+ "learning_rate": 1.625e-05,
162
+ "loss": 8.6382,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 3.7938144329896906,
167
+ "grad_norm": 4.300820350646973,
168
+ "learning_rate": 1.5625e-05,
169
+ "loss": 8.5496,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 3.9587628865979383,
174
+ "grad_norm": 3.571364164352417,
175
+ "learning_rate": 1.5e-05,
176
+ "loss": 8.4785,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 4.123711340206185,
181
+ "grad_norm": 3.5201902389526367,
182
+ "learning_rate": 1.4375e-05,
183
+ "loss": 8.4425,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 4.288659793814433,
188
+ "grad_norm": 3.3802483081817627,
189
+ "learning_rate": 1.375e-05,
190
+ "loss": 8.4132,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 4.453608247422681,
195
+ "grad_norm": 3.3173177242279053,
196
+ "learning_rate": 1.3125e-05,
197
+ "loss": 8.3052,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 4.618556701030927,
202
+ "grad_norm": 3.4614617824554443,
203
+ "learning_rate": 1.25e-05,
204
+ "loss": 8.1881,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 4.783505154639175,
209
+ "grad_norm": 3.216718912124634,
210
+ "learning_rate": 1.1874999999999999e-05,
211
+ "loss": 8.2378,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 4.948453608247423,
216
+ "grad_norm": 3.36505126953125,
217
+ "learning_rate": 1.125e-05,
218
+ "loss": 8.1809,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 4.948453608247423,
223
+ "eval_audio_cosine_sim": 0.557277500629425,
224
+ "eval_loss": 3.095508575439453,
225
+ "eval_runtime": 2121.8578,
226
+ "eval_samples_per_second": 0.006,
227
+ "eval_steps_per_second": 0.006,
228
+ "eval_text_cosine_sim": 0.3839966356754303,
229
+ "step": 30
230
+ },
231
+ {
232
+ "epoch": 5.11340206185567,
233
+ "grad_norm": 3.3731958866119385,
234
+ "learning_rate": 1.0625e-05,
235
+ "loss": 8.1047,
236
+ "step": 31
237
+ },
238
+ {
239
+ "epoch": 5.278350515463917,
240
+ "grad_norm": 3.572460651397705,
241
+ "learning_rate": 9.999999999999999e-06,
242
+ "loss": 7.9906,
243
+ "step": 32
244
+ },
245
+ {
246
+ "epoch": 5.443298969072165,
247
+ "grad_norm": 3.343137502670288,
248
+ "learning_rate": 9.375000000000001e-06,
249
+ "loss": 8.1973,
250
+ "step": 33
251
+ },
252
+ {
253
+ "epoch": 5.608247422680412,
254
+ "grad_norm": 3.5458569526672363,
255
+ "learning_rate": 8.750000000000001e-06,
256
+ "loss": 8.0155,
257
+ "step": 34
258
+ },
259
+ {
260
+ "epoch": 5.77319587628866,
261
+ "grad_norm": 3.403402328491211,
262
+ "learning_rate": 8.125e-06,
263
+ "loss": 8.0047,
264
+ "step": 35
265
+ },
266
+ {
267
+ "epoch": 5.938144329896907,
268
+ "grad_norm": 3.5528311729431152,
269
+ "learning_rate": 7.5e-06,
270
+ "loss": 7.8875,
271
+ "step": 36
272
+ },
273
+ {
274
+ "epoch": 6.103092783505154,
275
+ "grad_norm": 3.659574270248413,
276
+ "learning_rate": 6.875e-06,
277
+ "loss": 7.82,
278
+ "step": 37
279
+ },
280
+ {
281
+ "epoch": 6.268041237113402,
282
+ "grad_norm": 3.419759511947632,
283
+ "learning_rate": 6.25e-06,
284
+ "loss": 7.8027,
285
+ "step": 38
286
+ },
287
+ {
288
+ "epoch": 6.43298969072165,
289
+ "grad_norm": 3.7508034706115723,
290
+ "learning_rate": 5.625e-06,
291
+ "loss": 7.8398,
292
+ "step": 39
293
+ },
294
+ {
295
+ "epoch": 6.597938144329897,
296
+ "grad_norm": 3.735914945602417,
297
+ "learning_rate": 4.9999999999999996e-06,
298
+ "loss": 7.8924,
299
+ "step": 40
300
+ },
301
+ {
302
+ "epoch": 6.762886597938144,
303
+ "grad_norm": 3.593177318572998,
304
+ "learning_rate": 4.3750000000000005e-06,
305
+ "loss": 7.9886,
306
+ "step": 41
307
+ },
308
+ {
309
+ "epoch": 6.927835051546392,
310
+ "grad_norm": 3.5156137943267822,
311
+ "learning_rate": 3.75e-06,
312
+ "loss": 7.8703,
313
+ "step": 42
314
+ },
315
+ {
316
+ "epoch": 7.092783505154639,
317
+ "grad_norm": 3.6164469718933105,
318
+ "learning_rate": 3.125e-06,
319
+ "loss": 7.7555,
320
+ "step": 43
321
+ },
322
+ {
323
+ "epoch": 7.257731958762887,
324
+ "grad_norm": 3.5708608627319336,
325
+ "learning_rate": 2.4999999999999998e-06,
326
+ "loss": 7.7007,
327
+ "step": 44
328
+ },
329
+ {
330
+ "epoch": 7.422680412371134,
331
+ "grad_norm": 3.396042585372925,
332
+ "learning_rate": 1.875e-06,
333
+ "loss": 7.9848,
334
+ "step": 45
335
+ },
336
+ {
337
+ "epoch": 7.587628865979381,
338
+ "grad_norm": 3.5203356742858887,
339
+ "learning_rate": 1.2499999999999999e-06,
340
+ "loss": 7.85,
341
+ "step": 46
342
+ },
343
+ {
344
+ "epoch": 7.752577319587629,
345
+ "grad_norm": 3.814443349838257,
346
+ "learning_rate": 6.249999999999999e-07,
347
+ "loss": 7.566,
348
+ "step": 47
349
+ },
350
+ {
351
+ "epoch": 7.917525773195877,
352
+ "grad_norm": 3.4914655685424805,
353
+ "learning_rate": 0.0,
354
+ "loss": 7.8495,
355
+ "step": 48
356
+ },
357
+ {
358
+ "epoch": 7.917525773195877,
359
+ "step": 48,
360
+ "total_flos": 962957869218000.0,
361
+ "train_loss": 8.715420136849085,
362
+ "train_runtime": 7523.7142,
363
+ "train_samples_per_second": 0.103,
364
+ "train_steps_per_second": 0.006
365
+ }
366
+ ],
367
+ "logging_steps": 1.0,
368
+ "max_steps": 48,
369
+ "num_input_tokens_seen": 0,
370
+ "num_train_epochs": 8,
371
+ "save_steps": 500,
372
+ "stateful_callbacks": {
373
+ "TrainerControl": {
374
+ "args": {
375
+ "should_epoch_stop": false,
376
+ "should_evaluate": false,
377
+ "should_log": false,
378
+ "should_save": false,
379
+ "should_training_stop": false
380
+ },
381
+ "attributes": {}
382
+ }
383
+ },
384
+ "total_flos": 962957869218000.0,
385
+ "train_batch_size": 1,
386
+ "trial_name": null,
387
+ "trial_params": null
388
+ }