sanchit-gandhi's picture
End of training
068cda0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.917525773195877,
"eval_steps": 30,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16494845360824742,
"grad_norm": 1.603933334350586,
"learning_rate": 2.9375e-05,
"loss": 10.1011,
"step": 1
},
{
"epoch": 0.32989690721649484,
"grad_norm": 1.7179864645004272,
"learning_rate": 2.875e-05,
"loss": 10.2338,
"step": 2
},
{
"epoch": 0.4948453608247423,
"grad_norm": 1.7488847970962524,
"learning_rate": 2.8125e-05,
"loss": 10.0559,
"step": 3
},
{
"epoch": 0.6597938144329897,
"grad_norm": 2.1688830852508545,
"learning_rate": 2.75e-05,
"loss": 10.2016,
"step": 4
},
{
"epoch": 0.8247422680412371,
"grad_norm": 2.111985206604004,
"learning_rate": 2.6875000000000003e-05,
"loss": 10.0667,
"step": 5
},
{
"epoch": 0.9896907216494846,
"grad_norm": 2.164363145828247,
"learning_rate": 2.625e-05,
"loss": 9.8519,
"step": 6
},
{
"epoch": 1.1546391752577319,
"grad_norm": 2.308406114578247,
"learning_rate": 2.5625e-05,
"loss": 9.92,
"step": 7
},
{
"epoch": 1.3195876288659794,
"grad_norm": 2.625577688217163,
"learning_rate": 2.5e-05,
"loss": 9.8473,
"step": 8
},
{
"epoch": 1.4845360824742269,
"grad_norm": 3.0818679332733154,
"learning_rate": 2.4375e-05,
"loss": 9.8717,
"step": 9
},
{
"epoch": 1.6494845360824741,
"grad_norm": 2.6675806045532227,
"learning_rate": 2.3749999999999998e-05,
"loss": 9.6087,
"step": 10
},
{
"epoch": 1.8144329896907216,
"grad_norm": 2.8696348667144775,
"learning_rate": 2.3125000000000003e-05,
"loss": 9.6005,
"step": 11
},
{
"epoch": 1.9793814432989691,
"grad_norm": 3.16064453125,
"learning_rate": 2.25e-05,
"loss": 9.5111,
"step": 12
},
{
"epoch": 2.1443298969072164,
"grad_norm": 3.2917752265930176,
"learning_rate": 2.1875e-05,
"loss": 9.4589,
"step": 13
},
{
"epoch": 2.3092783505154637,
"grad_norm": 3.362398624420166,
"learning_rate": 2.125e-05,
"loss": 9.3264,
"step": 14
},
{
"epoch": 2.4742268041237114,
"grad_norm": 3.687838315963745,
"learning_rate": 2.0625e-05,
"loss": 9.3032,
"step": 15
},
{
"epoch": 2.6391752577319587,
"grad_norm": 3.6583521366119385,
"learning_rate": 1.9999999999999998e-05,
"loss": 9.1917,
"step": 16
},
{
"epoch": 2.804123711340206,
"grad_norm": 3.846595048904419,
"learning_rate": 1.9375e-05,
"loss": 9.0754,
"step": 17
},
{
"epoch": 2.9690721649484537,
"grad_norm": 3.7431483268737793,
"learning_rate": 1.8750000000000002e-05,
"loss": 9.0118,
"step": 18
},
{
"epoch": 3.134020618556701,
"grad_norm": 3.54080867767334,
"learning_rate": 1.8125e-05,
"loss": 8.8953,
"step": 19
},
{
"epoch": 3.2989690721649483,
"grad_norm": 3.989246368408203,
"learning_rate": 1.7500000000000002e-05,
"loss": 8.8344,
"step": 20
},
{
"epoch": 3.463917525773196,
"grad_norm": 3.540311574935913,
"learning_rate": 1.6875e-05,
"loss": 8.7965,
"step": 21
},
{
"epoch": 3.6288659793814433,
"grad_norm": 3.735725164413452,
"learning_rate": 1.625e-05,
"loss": 8.6344,
"step": 22
},
{
"epoch": 3.7938144329896906,
"grad_norm": 4.285547733306885,
"learning_rate": 1.5625e-05,
"loss": 8.544,
"step": 23
},
{
"epoch": 3.9587628865979383,
"grad_norm": 3.555814027786255,
"learning_rate": 1.5e-05,
"loss": 8.4752,
"step": 24
},
{
"epoch": 4.123711340206185,
"grad_norm": 3.49817156791687,
"learning_rate": 1.4375e-05,
"loss": 8.4403,
"step": 25
},
{
"epoch": 4.288659793814433,
"grad_norm": 3.3520140647888184,
"learning_rate": 1.375e-05,
"loss": 8.4122,
"step": 26
},
{
"epoch": 4.453608247422681,
"grad_norm": 3.296865224838257,
"learning_rate": 1.3125e-05,
"loss": 8.3044,
"step": 27
},
{
"epoch": 4.618556701030927,
"grad_norm": 3.4501516819000244,
"learning_rate": 1.25e-05,
"loss": 8.1882,
"step": 28
},
{
"epoch": 4.783505154639175,
"grad_norm": 3.2213199138641357,
"learning_rate": 1.1874999999999999e-05,
"loss": 8.2403,
"step": 29
},
{
"epoch": 4.948453608247423,
"grad_norm": 3.34763765335083,
"learning_rate": 1.125e-05,
"loss": 8.1814,
"step": 30
},
{
"epoch": 4.948453608247423,
"eval_audio_cosine_sim": 0.5797536969184875,
"eval_loss": 3.095458745956421,
"eval_runtime": 2074.7641,
"eval_samples_per_second": 0.006,
"eval_steps_per_second": 0.006,
"eval_text_cosine_sim": 0.38171055912971497,
"step": 30
},
{
"epoch": 5.11340206185567,
"grad_norm": 3.406193256378174,
"learning_rate": 1.0625e-05,
"loss": 8.109,
"step": 31
},
{
"epoch": 5.278350515463917,
"grad_norm": 3.601793050765991,
"learning_rate": 9.999999999999999e-06,
"loss": 7.9923,
"step": 32
},
{
"epoch": 5.443298969072165,
"grad_norm": 3.392193555831909,
"learning_rate": 9.375000000000001e-06,
"loss": 8.202,
"step": 33
},
{
"epoch": 5.608247422680412,
"grad_norm": 3.5988409519195557,
"learning_rate": 8.750000000000001e-06,
"loss": 8.0204,
"step": 34
},
{
"epoch": 5.77319587628866,
"grad_norm": 3.4323904514312744,
"learning_rate": 8.125e-06,
"loss": 8.008,
"step": 35
},
{
"epoch": 5.938144329896907,
"grad_norm": 3.5866267681121826,
"learning_rate": 7.5e-06,
"loss": 7.8909,
"step": 36
},
{
"epoch": 6.103092783505154,
"grad_norm": 3.682654619216919,
"learning_rate": 6.875e-06,
"loss": 7.8228,
"step": 37
},
{
"epoch": 6.268041237113402,
"grad_norm": 3.4344913959503174,
"learning_rate": 6.25e-06,
"loss": 7.8066,
"step": 38
},
{
"epoch": 6.43298969072165,
"grad_norm": 3.7810468673706055,
"learning_rate": 5.625e-06,
"loss": 7.8461,
"step": 39
},
{
"epoch": 6.597938144329897,
"grad_norm": 3.7566187381744385,
"learning_rate": 4.9999999999999996e-06,
"loss": 7.8968,
"step": 40
},
{
"epoch": 6.762886597938144,
"grad_norm": 3.617682695388794,
"learning_rate": 4.3750000000000005e-06,
"loss": 7.9953,
"step": 41
},
{
"epoch": 6.927835051546392,
"grad_norm": 3.5307486057281494,
"learning_rate": 3.75e-06,
"loss": 7.8764,
"step": 42
},
{
"epoch": 7.092783505154639,
"grad_norm": 3.624812364578247,
"learning_rate": 3.125e-06,
"loss": 7.7608,
"step": 43
},
{
"epoch": 7.257731958762887,
"grad_norm": 3.573174476623535,
"learning_rate": 2.4999999999999998e-06,
"loss": 7.705,
"step": 44
},
{
"epoch": 7.422680412371134,
"grad_norm": 3.3969686031341553,
"learning_rate": 1.875e-06,
"loss": 7.9902,
"step": 45
},
{
"epoch": 7.587628865979381,
"grad_norm": 3.5248804092407227,
"learning_rate": 1.2499999999999999e-06,
"loss": 7.8566,
"step": 46
},
{
"epoch": 7.752577319587629,
"grad_norm": 3.8090217113494873,
"learning_rate": 6.249999999999999e-07,
"loss": 7.5691,
"step": 47
},
{
"epoch": 7.917525773195877,
"grad_norm": 3.4885289669036865,
"learning_rate": 0.0,
"loss": 7.8552,
"step": 48
},
{
"epoch": 7.917525773195877,
"step": 48,
"total_flos": 962957869218000.0,
"train_loss": 8.716416617234549,
"train_runtime": 7439.5895,
"train_samples_per_second": 0.104,
"train_steps_per_second": 0.006
}
],
"logging_steps": 1.0,
"max_steps": 48,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 962957869218000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}