|
{ |
|
"best_metric": 0.8276850886339938, |
|
"best_model_checkpoint": "./runs/checkpoint-29920", |
|
"epoch": 2.0, |
|
"global_step": 29920, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.218154152691866e-05, |
|
"loss": 0.2013, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.2078899619883618e-05, |
|
"loss": 0.1722, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.1976257712848578e-05, |
|
"loss": 0.1653, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.1873615805813536e-05, |
|
"loss": 0.1488, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1770973898778496e-05, |
|
"loss": 0.136, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.1668331991743454e-05, |
|
"loss": 0.1522, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.1565690084708414e-05, |
|
"loss": 0.1457, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.1463048177673372e-05, |
|
"loss": 0.1337, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.1360406270638331e-05, |
|
"loss": 0.1346, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.1257764363603291e-05, |
|
"loss": 0.1354, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.115512245656825e-05, |
|
"loss": 0.1442, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.1052480549533209e-05, |
|
"loss": 0.126, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.0949838642498167e-05, |
|
"loss": 0.1274, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.0847196735463127e-05, |
|
"loss": 0.1225, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.0744554828428085e-05, |
|
"loss": 0.1301, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.0641912921393045e-05, |
|
"loss": 0.1357, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.0539271014358003e-05, |
|
"loss": 0.1272, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.0436629107322963e-05, |
|
"loss": 0.1353, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.0333987200287922e-05, |
|
"loss": 0.1361, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.023134529325288e-05, |
|
"loss": 0.1303, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.012870338621784e-05, |
|
"loss": 0.1294, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.00260614791828e-05, |
|
"loss": 0.1131, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 9.923419572147758e-06, |
|
"loss": 0.1072, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 9.820777665112718e-06, |
|
"loss": 0.1222, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 9.718135758077678e-06, |
|
"loss": 0.1221, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 9.615493851042636e-06, |
|
"loss": 0.1196, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 9.512851944007596e-06, |
|
"loss": 0.1191, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.410210036972554e-06, |
|
"loss": 0.1185, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 9.307568129937513e-06, |
|
"loss": 0.1265, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9647807886095305, |
|
"eval_f1": 0.8202175303902751, |
|
"eval_loss": 0.13746094703674316, |
|
"eval_precision": 0.8026546456298522, |
|
"eval_recall": 0.8385661957090529, |
|
"eval_runtime": 509.4329, |
|
"eval_samples_per_second": 78.309, |
|
"step": 14960 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.204926222902473e-06, |
|
"loss": 0.1162, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 9.102284315867431e-06, |
|
"loss": 0.0916, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 8.999642408832391e-06, |
|
"loss": 0.0878, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 8.897000501797349e-06, |
|
"loss": 0.0929, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 8.794358594762309e-06, |
|
"loss": 0.1018, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 8.691716687727267e-06, |
|
"loss": 0.098, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 8.589074780692227e-06, |
|
"loss": 0.0895, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 8.486432873657185e-06, |
|
"loss": 0.0824, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 8.383790966622145e-06, |
|
"loss": 0.0758, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.281149059587103e-06, |
|
"loss": 0.088, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 8.178507152552062e-06, |
|
"loss": 0.0975, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 8.075865245517022e-06, |
|
"loss": 0.0917, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 7.97322333848198e-06, |
|
"loss": 0.0952, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.87058143144694e-06, |
|
"loss": 0.0963, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 7.767939524411898e-06, |
|
"loss": 0.092, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.665297617376858e-06, |
|
"loss": 0.1029, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.562655710341816e-06, |
|
"loss": 0.0938, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 7.460013803306776e-06, |
|
"loss": 0.0995, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 7.357371896271735e-06, |
|
"loss": 0.0875, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 7.254729989236694e-06, |
|
"loss": 0.1, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 7.152088082201653e-06, |
|
"loss": 0.0841, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 7.049446175166612e-06, |
|
"loss": 0.09, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 6.946804268131571e-06, |
|
"loss": 0.0797, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 6.84416236109653e-06, |
|
"loss": 0.0888, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 6.74152045406149e-06, |
|
"loss": 0.0804, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.638878547026448e-06, |
|
"loss": 0.0989, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.536236639991408e-06, |
|
"loss": 0.0879, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.433594732956366e-06, |
|
"loss": 0.0955, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 6.330952825921326e-06, |
|
"loss": 0.0928, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.228310918886284e-06, |
|
"loss": 0.0852, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9668613541222771, |
|
"eval_f1": 0.8276850886339938, |
|
"eval_loss": 0.13479416072368622, |
|
"eval_precision": 0.8246753246753247, |
|
"eval_recall": 0.8307169021454736, |
|
"eval_runtime": 509.3337, |
|
"eval_samples_per_second": 78.324, |
|
"step": 29920 |
|
} |
|
], |
|
"max_steps": 59840, |
|
"num_train_epochs": 4, |
|
"total_flos": 7.964185065787392e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|