sanchit-gandhi HF staff commited on
Commit
88b2eb1
1 Parent(s): c169ac1

End of training

Browse files
Files changed (5) hide show
  1. README.md +6 -4
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +744 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody-large
7
  model-index:
@@ -16,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanchit-gandhi/black-eyed-peas/runs/cglfpwuh)
17
  # black-eyed-peas-v1-crafted-variable-prompt-16-epochs-piano-prompts
18
 
19
- This model is a fine-tuned version of [facebook/musicgen-melody-large](https://huggingface.co/facebook/musicgen-melody-large) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 3.6277
22
- - Text Cosine Sim: 0.2459
23
- - Audio Cosine Sim: 0.5883
24
 
25
  ## Model description
26
 
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
+ - text-to-audio
6
+ - sweet-dreambooths/black-eyed-peas-v1-piano-prompts
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody-large
9
  model-index:
 
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanchit-gandhi/black-eyed-peas/runs/cglfpwuh)
19
  # black-eyed-peas-v1-crafted-variable-prompt-16-epochs-piano-prompts
20
 
21
+ This model is a fine-tuned version of [facebook/musicgen-melody-large](https://huggingface.co/facebook/musicgen-melody-large) on the SWEET-DREAMBOOTHS/BLACK-EYED-PEAS-V1-PIANO-PROMPTS - DEFAULT dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 3.6438
24
+ - Text Cosine Sim: 0.2460
25
+ - Audio Cosine Sim: 0.5781
26
 
27
  ## Model description
28
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.835051546391753,
3
+ "eval_audio_cosine_sim": 0.5780665278434753,
4
+ "eval_loss": 3.6437976360321045,
5
+ "eval_runtime": 1037.2655,
6
+ "eval_samples": 12,
7
+ "eval_samples_per_second": 0.012,
8
+ "eval_steps_per_second": 0.012,
9
+ "eval_text_cosine_sim": 0.246041402220726,
10
+ "total_flos": 1925884422732936.0,
11
+ "train_loss": 7.460742597778638,
12
+ "train_runtime": 13364.4334,
13
+ "train_samples": 97,
14
+ "train_samples_per_second": 0.116,
15
+ "train_steps_per_second": 0.007
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.835051546391753,
3
+ "eval_audio_cosine_sim": 0.5780665278434753,
4
+ "eval_loss": 3.6437976360321045,
5
+ "eval_runtime": 1037.2655,
6
+ "eval_samples": 12,
7
+ "eval_samples_per_second": 0.012,
8
+ "eval_steps_per_second": 0.012,
9
+ "eval_text_cosine_sim": 0.246041402220726
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.835051546391753,
3
+ "total_flos": 1925884422732936.0,
4
+ "train_loss": 7.460742597778638,
5
+ "train_runtime": 13364.4334,
6
+ "train_samples": 97,
7
+ "train_samples_per_second": 0.116,
8
+ "train_steps_per_second": 0.007
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 15.835051546391753,
5
+ "eval_steps": 30,
6
+ "global_step": 96,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16494845360824742,
13
+ "grad_norm": 1.603933334350586,
14
+ "learning_rate": 2.9687500000000003e-05,
15
+ "loss": 10.1011,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.32989690721649484,
20
+ "grad_norm": 1.7179864645004272,
21
+ "learning_rate": 2.9375e-05,
22
+ "loss": 10.2338,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.4948453608247423,
27
+ "grad_norm": 1.748449444770813,
28
+ "learning_rate": 2.90625e-05,
29
+ "loss": 10.0554,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.6597938144329897,
34
+ "grad_norm": 2.172945976257324,
35
+ "learning_rate": 2.875e-05,
36
+ "loss": 10.1999,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.8247422680412371,
41
+ "grad_norm": 2.121206045150757,
42
+ "learning_rate": 2.84375e-05,
43
+ "loss": 10.0634,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.9896907216494846,
48
+ "grad_norm": 2.179741144180298,
49
+ "learning_rate": 2.8125e-05,
50
+ "loss": 9.8454,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 1.1546391752577319,
55
+ "grad_norm": 2.3345258235931396,
56
+ "learning_rate": 2.7812500000000002e-05,
57
+ "loss": 9.9095,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 1.3195876288659794,
62
+ "grad_norm": 2.664315938949585,
63
+ "learning_rate": 2.75e-05,
64
+ "loss": 9.8296,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 1.4845360824742269,
69
+ "grad_norm": 3.1426305770874023,
70
+ "learning_rate": 2.71875e-05,
71
+ "loss": 9.8446,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 1.6494845360824741,
76
+ "grad_norm": 2.7230398654937744,
77
+ "learning_rate": 2.6875000000000003e-05,
78
+ "loss": 9.5769,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 1.8144329896907216,
83
+ "grad_norm": 2.9412689208984375,
84
+ "learning_rate": 2.65625e-05,
85
+ "loss": 9.5563,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 1.9793814432989691,
90
+ "grad_norm": 3.2456820011138916,
91
+ "learning_rate": 2.625e-05,
92
+ "loss": 9.4496,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 2.1443298969072164,
97
+ "grad_norm": 3.3948400020599365,
98
+ "learning_rate": 2.59375e-05,
99
+ "loss": 9.3822,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 2.3092783505154637,
104
+ "grad_norm": 3.4787559509277344,
105
+ "learning_rate": 2.5625e-05,
106
+ "loss": 9.2314,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 2.4742268041237114,
111
+ "grad_norm": 3.8148350715637207,
112
+ "learning_rate": 2.5312500000000002e-05,
113
+ "loss": 9.1825,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 2.6391752577319587,
118
+ "grad_norm": 3.7718141078948975,
119
+ "learning_rate": 2.5e-05,
120
+ "loss": 9.0502,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 2.804123711340206,
125
+ "grad_norm": 3.9114766120910645,
126
+ "learning_rate": 2.46875e-05,
127
+ "loss": 8.9091,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 2.9690721649484537,
132
+ "grad_norm": 3.791461706161499,
133
+ "learning_rate": 2.4375e-05,
134
+ "loss": 8.825,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 3.134020618556701,
139
+ "grad_norm": 3.532548666000366,
140
+ "learning_rate": 2.4062500000000002e-05,
141
+ "loss": 8.6953,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 3.2989690721649483,
146
+ "grad_norm": 3.9233362674713135,
147
+ "learning_rate": 2.3749999999999998e-05,
148
+ "loss": 8.5862,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 3.463917525773196,
153
+ "grad_norm": 3.315626621246338,
154
+ "learning_rate": 2.34375e-05,
155
+ "loss": 8.5552,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 3.6288659793814433,
160
+ "grad_norm": 3.417571783065796,
161
+ "learning_rate": 2.3125000000000003e-05,
162
+ "loss": 8.358,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 3.7938144329896906,
167
+ "grad_norm": 3.810925006866455,
168
+ "learning_rate": 2.28125e-05,
169
+ "loss": 8.2089,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 3.9587628865979383,
174
+ "grad_norm": 3.319178819656372,
175
+ "learning_rate": 2.25e-05,
176
+ "loss": 8.1787,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 4.123711340206185,
181
+ "grad_norm": 3.318840742111206,
182
+ "learning_rate": 2.21875e-05,
183
+ "loss": 8.1272,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 4.288659793814433,
188
+ "grad_norm": 3.684760332107544,
189
+ "learning_rate": 2.1875e-05,
190
+ "loss": 8.1127,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 4.453608247422681,
195
+ "grad_norm": 3.7684097290039062,
196
+ "learning_rate": 2.15625e-05,
197
+ "loss": 7.9922,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 4.618556701030927,
202
+ "grad_norm": 4.493322849273682,
203
+ "learning_rate": 2.125e-05,
204
+ "loss": 7.8609,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 4.783505154639175,
209
+ "grad_norm": 4.545900821685791,
210
+ "learning_rate": 2.09375e-05,
211
+ "loss": 7.9192,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 4.948453608247423,
216
+ "grad_norm": 3.999005079269409,
217
+ "learning_rate": 2.0625e-05,
218
+ "loss": 7.7976,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 4.948453608247423,
223
+ "eval_audio_cosine_sim": 0.5581808090209961,
224
+ "eval_loss": 3.100252151489258,
225
+ "eval_runtime": 2102.121,
226
+ "eval_samples_per_second": 0.006,
227
+ "eval_steps_per_second": 0.006,
228
+ "eval_text_cosine_sim": 0.3910459578037262,
229
+ "step": 30
230
+ },
231
+ {
232
+ "epoch": 5.11340206185567,
233
+ "grad_norm": 4.612445831298828,
234
+ "learning_rate": 2.0312500000000002e-05,
235
+ "loss": 7.7548,
236
+ "step": 31
237
+ },
238
+ {
239
+ "epoch": 5.278350515463917,
240
+ "grad_norm": 4.341012001037598,
241
+ "learning_rate": 1.9999999999999998e-05,
242
+ "loss": 7.5716,
243
+ "step": 32
244
+ },
245
+ {
246
+ "epoch": 5.443298969072165,
247
+ "grad_norm": 4.00157356262207,
248
+ "learning_rate": 1.96875e-05,
249
+ "loss": 7.8051,
250
+ "step": 33
251
+ },
252
+ {
253
+ "epoch": 5.608247422680412,
254
+ "grad_norm": 3.777099847793579,
255
+ "learning_rate": 1.9375e-05,
256
+ "loss": 7.5696,
257
+ "step": 34
258
+ },
259
+ {
260
+ "epoch": 5.77319587628866,
261
+ "grad_norm": 3.256072998046875,
262
+ "learning_rate": 1.90625e-05,
263
+ "loss": 7.5285,
264
+ "step": 35
265
+ },
266
+ {
267
+ "epoch": 5.938144329896907,
268
+ "grad_norm": 3.026923179626465,
269
+ "learning_rate": 1.8750000000000002e-05,
270
+ "loss": 7.3408,
271
+ "step": 36
272
+ },
273
+ {
274
+ "epoch": 6.103092783505154,
275
+ "grad_norm": 2.9930453300476074,
276
+ "learning_rate": 1.84375e-05,
277
+ "loss": 7.2398,
278
+ "step": 37
279
+ },
280
+ {
281
+ "epoch": 6.268041237113402,
282
+ "grad_norm": 2.6270177364349365,
283
+ "learning_rate": 1.8125e-05,
284
+ "loss": 7.2282,
285
+ "step": 38
286
+ },
287
+ {
288
+ "epoch": 6.43298969072165,
289
+ "grad_norm": 2.583390474319458,
290
+ "learning_rate": 1.78125e-05,
291
+ "loss": 7.2528,
292
+ "step": 39
293
+ },
294
+ {
295
+ "epoch": 6.597938144329897,
296
+ "grad_norm": 2.5908498764038086,
297
+ "learning_rate": 1.7500000000000002e-05,
298
+ "loss": 7.2968,
299
+ "step": 40
300
+ },
301
+ {
302
+ "epoch": 6.762886597938144,
303
+ "grad_norm": 2.725625991821289,
304
+ "learning_rate": 1.7187499999999998e-05,
305
+ "loss": 7.4103,
306
+ "step": 41
307
+ },
308
+ {
309
+ "epoch": 6.927835051546392,
310
+ "grad_norm": 2.520019769668579,
311
+ "learning_rate": 1.6875e-05,
312
+ "loss": 7.2359,
313
+ "step": 42
314
+ },
315
+ {
316
+ "epoch": 7.092783505154639,
317
+ "grad_norm": 2.3079137802124023,
318
+ "learning_rate": 1.6562500000000003e-05,
319
+ "loss": 7.0846,
320
+ "step": 43
321
+ },
322
+ {
323
+ "epoch": 7.257731958762887,
324
+ "grad_norm": 2.5834431648254395,
325
+ "learning_rate": 1.625e-05,
326
+ "loss": 6.9889,
327
+ "step": 44
328
+ },
329
+ {
330
+ "epoch": 7.422680412371134,
331
+ "grad_norm": 2.618986129760742,
332
+ "learning_rate": 1.59375e-05,
333
+ "loss": 7.2963,
334
+ "step": 45
335
+ },
336
+ {
337
+ "epoch": 7.587628865979381,
338
+ "grad_norm": 2.395085573196411,
339
+ "learning_rate": 1.5625e-05,
340
+ "loss": 7.1191,
341
+ "step": 46
342
+ },
343
+ {
344
+ "epoch": 7.752577319587629,
345
+ "grad_norm": 2.3549487590789795,
346
+ "learning_rate": 1.53125e-05,
347
+ "loss": 6.7482,
348
+ "step": 47
349
+ },
350
+ {
351
+ "epoch": 7.917525773195877,
352
+ "grad_norm": 2.349393606185913,
353
+ "learning_rate": 1.5e-05,
354
+ "loss": 7.0581,
355
+ "step": 48
356
+ },
357
+ {
358
+ "epoch": 8.082474226804123,
359
+ "grad_norm": 2.2347733974456787,
360
+ "learning_rate": 1.46875e-05,
361
+ "loss": 6.8346,
362
+ "step": 49
363
+ },
364
+ {
365
+ "epoch": 8.24742268041237,
366
+ "grad_norm": 2.182095527648926,
367
+ "learning_rate": 1.4375e-05,
368
+ "loss": 7.0005,
369
+ "step": 50
370
+ },
371
+ {
372
+ "epoch": 8.412371134020619,
373
+ "grad_norm": 2.0510122776031494,
374
+ "learning_rate": 1.40625e-05,
375
+ "loss": 6.8881,
376
+ "step": 51
377
+ },
378
+ {
379
+ "epoch": 8.577319587628866,
380
+ "grad_norm": 1.9954293966293335,
381
+ "learning_rate": 1.375e-05,
382
+ "loss": 6.7836,
383
+ "step": 52
384
+ },
385
+ {
386
+ "epoch": 8.742268041237114,
387
+ "grad_norm": 1.9961941242218018,
388
+ "learning_rate": 1.3437500000000001e-05,
389
+ "loss": 6.6969,
390
+ "step": 53
391
+ },
392
+ {
393
+ "epoch": 8.907216494845361,
394
+ "grad_norm": 2.0308048725128174,
395
+ "learning_rate": 1.3125e-05,
396
+ "loss": 6.9004,
397
+ "step": 54
398
+ },
399
+ {
400
+ "epoch": 9.072164948453608,
401
+ "grad_norm": 1.9732003211975098,
402
+ "learning_rate": 1.28125e-05,
403
+ "loss": 6.7652,
404
+ "step": 55
405
+ },
406
+ {
407
+ "epoch": 9.237113402061855,
408
+ "grad_norm": 1.6928937435150146,
409
+ "learning_rate": 1.25e-05,
410
+ "loss": 6.6789,
411
+ "step": 56
412
+ },
413
+ {
414
+ "epoch": 9.402061855670103,
415
+ "grad_norm": 1.9639475345611572,
416
+ "learning_rate": 1.21875e-05,
417
+ "loss": 6.6207,
418
+ "step": 57
419
+ },
420
+ {
421
+ "epoch": 9.56701030927835,
422
+ "grad_norm": 1.6721774339675903,
423
+ "learning_rate": 1.1874999999999999e-05,
424
+ "loss": 6.7687,
425
+ "step": 58
426
+ },
427
+ {
428
+ "epoch": 9.731958762886597,
429
+ "grad_norm": 1.8212580680847168,
430
+ "learning_rate": 1.1562500000000002e-05,
431
+ "loss": 6.5534,
432
+ "step": 59
433
+ },
434
+ {
435
+ "epoch": 9.896907216494846,
436
+ "grad_norm": 1.6735063791275024,
437
+ "learning_rate": 1.125e-05,
438
+ "loss": 6.6807,
439
+ "step": 60
440
+ },
441
+ {
442
+ "epoch": 9.896907216494846,
443
+ "eval_audio_cosine_sim": 0.6627817749977112,
444
+ "eval_loss": 3.2311527729034424,
445
+ "eval_runtime": 2160.5029,
446
+ "eval_samples_per_second": 0.006,
447
+ "eval_steps_per_second": 0.006,
448
+ "eval_text_cosine_sim": 0.35343077778816223,
449
+ "step": 60
450
+ },
451
+ {
452
+ "epoch": 10.061855670103093,
453
+ "grad_norm": 1.6333427429199219,
454
+ "learning_rate": 1.09375e-05,
455
+ "loss": 6.6808,
456
+ "step": 61
457
+ },
458
+ {
459
+ "epoch": 10.22680412371134,
460
+ "grad_norm": 1.5876134634017944,
461
+ "learning_rate": 1.0625e-05,
462
+ "loss": 6.6629,
463
+ "step": 62
464
+ },
465
+ {
466
+ "epoch": 10.391752577319588,
467
+ "grad_norm": 1.4894704818725586,
468
+ "learning_rate": 1.03125e-05,
469
+ "loss": 6.6252,
470
+ "step": 63
471
+ },
472
+ {
473
+ "epoch": 10.556701030927835,
474
+ "grad_norm": 1.5303210020065308,
475
+ "learning_rate": 9.999999999999999e-06,
476
+ "loss": 6.4798,
477
+ "step": 64
478
+ },
479
+ {
480
+ "epoch": 10.721649484536082,
481
+ "grad_norm": 1.3529256582260132,
482
+ "learning_rate": 9.6875e-06,
483
+ "loss": 6.5828,
484
+ "step": 65
485
+ },
486
+ {
487
+ "epoch": 10.88659793814433,
488
+ "grad_norm": 1.4958022832870483,
489
+ "learning_rate": 9.375000000000001e-06,
490
+ "loss": 6.5528,
491
+ "step": 66
492
+ },
493
+ {
494
+ "epoch": 11.051546391752577,
495
+ "grad_norm": 1.6871830224990845,
496
+ "learning_rate": 9.0625e-06,
497
+ "loss": 6.3689,
498
+ "step": 67
499
+ },
500
+ {
501
+ "epoch": 11.216494845360824,
502
+ "grad_norm": 1.5430212020874023,
503
+ "learning_rate": 8.750000000000001e-06,
504
+ "loss": 6.3102,
505
+ "step": 68
506
+ },
507
+ {
508
+ "epoch": 11.381443298969073,
509
+ "grad_norm": 1.4388532638549805,
510
+ "learning_rate": 8.4375e-06,
511
+ "loss": 6.4015,
512
+ "step": 69
513
+ },
514
+ {
515
+ "epoch": 11.54639175257732,
516
+ "grad_norm": 1.7323001623153687,
517
+ "learning_rate": 8.125e-06,
518
+ "loss": 6.7011,
519
+ "step": 70
520
+ },
521
+ {
522
+ "epoch": 11.711340206185566,
523
+ "grad_norm": 1.4382604360580444,
524
+ "learning_rate": 7.8125e-06,
525
+ "loss": 6.3432,
526
+ "step": 71
527
+ },
528
+ {
529
+ "epoch": 11.876288659793815,
530
+ "grad_norm": 1.323933720588684,
531
+ "learning_rate": 7.5e-06,
532
+ "loss": 6.5288,
533
+ "step": 72
534
+ },
535
+ {
536
+ "epoch": 12.041237113402062,
537
+ "grad_norm": 1.3559449911117554,
538
+ "learning_rate": 7.1875e-06,
539
+ "loss": 6.557,
540
+ "step": 73
541
+ },
542
+ {
543
+ "epoch": 12.206185567010309,
544
+ "grad_norm": 1.1633719205856323,
545
+ "learning_rate": 6.875e-06,
546
+ "loss": 6.386,
547
+ "step": 74
548
+ },
549
+ {
550
+ "epoch": 12.371134020618557,
551
+ "grad_norm": 1.5071109533309937,
552
+ "learning_rate": 6.5625e-06,
553
+ "loss": 6.5438,
554
+ "step": 75
555
+ },
556
+ {
557
+ "epoch": 12.536082474226804,
558
+ "grad_norm": 1.1435602903366089,
559
+ "learning_rate": 6.25e-06,
560
+ "loss": 6.4093,
561
+ "step": 76
562
+ },
563
+ {
564
+ "epoch": 12.70103092783505,
565
+ "grad_norm": 1.4174741506576538,
566
+ "learning_rate": 5.9374999999999995e-06,
567
+ "loss": 6.3887,
568
+ "step": 77
569
+ },
570
+ {
571
+ "epoch": 12.8659793814433,
572
+ "grad_norm": 1.423195481300354,
573
+ "learning_rate": 5.625e-06,
574
+ "loss": 6.5358,
575
+ "step": 78
576
+ },
577
+ {
578
+ "epoch": 13.030927835051546,
579
+ "grad_norm": 1.2214951515197754,
580
+ "learning_rate": 5.3125e-06,
581
+ "loss": 6.4307,
582
+ "step": 79
583
+ },
584
+ {
585
+ "epoch": 13.195876288659793,
586
+ "grad_norm": 1.1089880466461182,
587
+ "learning_rate": 4.9999999999999996e-06,
588
+ "loss": 6.4179,
589
+ "step": 80
590
+ },
591
+ {
592
+ "epoch": 13.360824742268042,
593
+ "grad_norm": 1.0945030450820923,
594
+ "learning_rate": 4.6875000000000004e-06,
595
+ "loss": 6.3149,
596
+ "step": 81
597
+ },
598
+ {
599
+ "epoch": 13.525773195876289,
600
+ "grad_norm": 1.4375858306884766,
601
+ "learning_rate": 4.3750000000000005e-06,
602
+ "loss": 6.6049,
603
+ "step": 82
604
+ },
605
+ {
606
+ "epoch": 13.690721649484535,
607
+ "grad_norm": 1.2144274711608887,
608
+ "learning_rate": 4.0625e-06,
609
+ "loss": 6.3501,
610
+ "step": 83
611
+ },
612
+ {
613
+ "epoch": 13.855670103092784,
614
+ "grad_norm": 1.091561198234558,
615
+ "learning_rate": 3.75e-06,
616
+ "loss": 6.4059,
617
+ "step": 84
618
+ },
619
+ {
620
+ "epoch": 14.02061855670103,
621
+ "grad_norm": 1.4113916158676147,
622
+ "learning_rate": 3.4375e-06,
623
+ "loss": 6.1656,
624
+ "step": 85
625
+ },
626
+ {
627
+ "epoch": 14.185567010309278,
628
+ "grad_norm": 1.275417685508728,
629
+ "learning_rate": 3.125e-06,
630
+ "loss": 6.2095,
631
+ "step": 86
632
+ },
633
+ {
634
+ "epoch": 14.350515463917526,
635
+ "grad_norm": 1.1587275266647339,
636
+ "learning_rate": 2.8125e-06,
637
+ "loss": 6.4885,
638
+ "step": 87
639
+ },
640
+ {
641
+ "epoch": 14.515463917525773,
642
+ "grad_norm": 1.0685796737670898,
643
+ "learning_rate": 2.4999999999999998e-06,
644
+ "loss": 6.3991,
645
+ "step": 88
646
+ },
647
+ {
648
+ "epoch": 14.68041237113402,
649
+ "grad_norm": 1.102617859840393,
650
+ "learning_rate": 2.1875000000000002e-06,
651
+ "loss": 6.4392,
652
+ "step": 89
653
+ },
654
+ {
655
+ "epoch": 14.845360824742269,
656
+ "grad_norm": 1.0771472454071045,
657
+ "learning_rate": 1.875e-06,
658
+ "loss": 6.4444,
659
+ "step": 90
660
+ },
661
+ {
662
+ "epoch": 14.845360824742269,
663
+ "eval_audio_cosine_sim": 0.5883382558822632,
664
+ "eval_loss": 3.6276748180389404,
665
+ "eval_runtime": 1072.6246,
666
+ "eval_samples_per_second": 0.011,
667
+ "eval_steps_per_second": 0.011,
668
+ "eval_text_cosine_sim": 0.24592441320419312,
669
+ "step": 90
670
+ },
671
+ {
672
+ "epoch": 15.010309278350515,
673
+ "grad_norm": 1.2423425912857056,
674
+ "learning_rate": 1.5625e-06,
675
+ "loss": 6.2888,
676
+ "step": 91
677
+ },
678
+ {
679
+ "epoch": 15.175257731958762,
680
+ "grad_norm": 1.1462149620056152,
681
+ "learning_rate": 1.2499999999999999e-06,
682
+ "loss": 6.4798,
683
+ "step": 92
684
+ },
685
+ {
686
+ "epoch": 15.34020618556701,
687
+ "grad_norm": 1.1183279752731323,
688
+ "learning_rate": 9.375e-07,
689
+ "loss": 6.254,
690
+ "step": 93
691
+ },
692
+ {
693
+ "epoch": 15.505154639175258,
694
+ "grad_norm": 1.1249028444290161,
695
+ "learning_rate": 6.249999999999999e-07,
696
+ "loss": 6.3222,
697
+ "step": 94
698
+ },
699
+ {
700
+ "epoch": 15.670103092783505,
701
+ "grad_norm": 1.2026952505111694,
702
+ "learning_rate": 3.1249999999999997e-07,
703
+ "loss": 6.2651,
704
+ "step": 95
705
+ },
706
+ {
707
+ "epoch": 15.835051546391753,
708
+ "grad_norm": 1.283206820487976,
709
+ "learning_rate": 0.0,
710
+ "loss": 6.553,
711
+ "step": 96
712
+ },
713
+ {
714
+ "epoch": 15.835051546391753,
715
+ "step": 96,
716
+ "total_flos": 1925884422732936.0,
717
+ "train_loss": 7.460742597778638,
718
+ "train_runtime": 13364.4334,
719
+ "train_samples_per_second": 0.116,
720
+ "train_steps_per_second": 0.007
721
+ }
722
+ ],
723
+ "logging_steps": 1.0,
724
+ "max_steps": 96,
725
+ "num_input_tokens_seen": 0,
726
+ "num_train_epochs": 16,
727
+ "save_steps": 500,
728
+ "stateful_callbacks": {
729
+ "TrainerControl": {
730
+ "args": {
731
+ "should_epoch_stop": false,
732
+ "should_evaluate": false,
733
+ "should_log": false,
734
+ "should_save": false,
735
+ "should_training_stop": false
736
+ },
737
+ "attributes": {}
738
+ }
739
+ },
740
+ "total_flos": 1925884422732936.0,
741
+ "train_batch_size": 1,
742
+ "trial_name": null,
743
+ "trial_params": null
744
+ }