|
{ |
|
"best_metric": 0.45803794264793396, |
|
"best_model_checkpoint": "output/checkpoint-150", |
|
"epoch": 0.9224907249573849, |
|
"eval_steps": 50, |
|
"global_step": 1150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.6453, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.777777777777779e-06, |
|
"loss": 1.3216, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.8057142857142857, |
|
"eval_loss": 0.6264824271202087, |
|
"eval_runtime": 28.8159, |
|
"eval_samples_per_second": 6.073, |
|
"eval_steps_per_second": 1.527, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.088888888888889e-05, |
|
"loss": 0.918, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.7885714285714286, |
|
"eval_loss": 0.49068325757980347, |
|
"eval_runtime": 26.1642, |
|
"eval_samples_per_second": 6.689, |
|
"eval_steps_per_second": 1.682, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.7219, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.7942857142857143, |
|
"eval_loss": 0.45803794264793396, |
|
"eval_runtime": 26.1308, |
|
"eval_samples_per_second": 6.697, |
|
"eval_steps_per_second": 1.684, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.311111111111111e-05, |
|
"loss": 0.6619, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.8285714285714286, |
|
"eval_loss": 0.4676758646965027, |
|
"eval_runtime": 26.1149, |
|
"eval_samples_per_second": 6.701, |
|
"eval_steps_per_second": 1.685, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.906953966699315e-05, |
|
"loss": 0.633, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.8114285714285714, |
|
"eval_loss": 0.4800652265548706, |
|
"eval_runtime": 26.1161, |
|
"eval_samples_per_second": 6.701, |
|
"eval_steps_per_second": 1.685, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.66209598432909e-05, |
|
"loss": 0.6147, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.7657142857142857, |
|
"eval_loss": 0.5112562775611877, |
|
"eval_runtime": 26.1127, |
|
"eval_samples_per_second": 6.702, |
|
"eval_steps_per_second": 1.685, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.4172380019588636e-05, |
|
"loss": 0.6355, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.7828571428571428, |
|
"eval_loss": 0.5210281610488892, |
|
"eval_runtime": 26.1048, |
|
"eval_samples_per_second": 6.704, |
|
"eval_steps_per_second": 1.686, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.1723800195886386e-05, |
|
"loss": 0.593, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_accuracy": 0.7885714285714286, |
|
"eval_loss": 0.5098910927772522, |
|
"eval_runtime": 26.1072, |
|
"eval_samples_per_second": 6.703, |
|
"eval_steps_per_second": 1.685, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.927522037218414e-05, |
|
"loss": 0.6047, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.7314285714285714, |
|
"eval_loss": 0.5602952837944031, |
|
"eval_runtime": 26.1116, |
|
"eval_samples_per_second": 6.702, |
|
"eval_steps_per_second": 1.685, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.682664054848188e-05, |
|
"loss": 0.5661, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_accuracy": 0.7542857142857143, |
|
"eval_loss": 0.5670996904373169, |
|
"eval_runtime": 26.1104, |
|
"eval_samples_per_second": 6.702, |
|
"eval_steps_per_second": 1.685, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.437806072477963e-05, |
|
"loss": 0.6111, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.7714285714285715, |
|
"eval_loss": 0.5074692368507385, |
|
"eval_runtime": 26.1037, |
|
"eval_samples_per_second": 6.704, |
|
"eval_steps_per_second": 1.686, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.1929480901077375e-05, |
|
"loss": 0.5788, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_accuracy": 0.7485714285714286, |
|
"eval_loss": 0.6720603108406067, |
|
"eval_runtime": 26.1036, |
|
"eval_samples_per_second": 6.704, |
|
"eval_steps_per_second": 1.686, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.9480901077375122e-05, |
|
"loss": 0.562, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.7942857142857143, |
|
"eval_loss": 0.5063876509666443, |
|
"eval_runtime": 26.1086, |
|
"eval_samples_per_second": 6.703, |
|
"eval_steps_per_second": 1.685, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.7032321253672872e-05, |
|
"loss": 0.5472, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.7371428571428571, |
|
"eval_loss": 0.5650284886360168, |
|
"eval_runtime": 26.1065, |
|
"eval_samples_per_second": 6.703, |
|
"eval_steps_per_second": 1.685, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.458374142997062e-05, |
|
"loss": 0.551, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_accuracy": 0.8, |
|
"eval_loss": 0.5052895545959473, |
|
"eval_runtime": 26.1094, |
|
"eval_samples_per_second": 6.703, |
|
"eval_steps_per_second": 1.685, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 2.2135161606268366e-05, |
|
"loss": 0.5259, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_accuracy": 0.7485714285714286, |
|
"eval_loss": 0.5384859442710876, |
|
"eval_runtime": 26.0891, |
|
"eval_samples_per_second": 6.708, |
|
"eval_steps_per_second": 1.687, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.9686581782566114e-05, |
|
"loss": 0.5231, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_accuracy": 0.7371428571428571, |
|
"eval_loss": 0.5821840763092041, |
|
"eval_runtime": 26.0996, |
|
"eval_samples_per_second": 6.705, |
|
"eval_steps_per_second": 1.686, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.7238001958863857e-05, |
|
"loss": 0.4987, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 0.5697786211967468, |
|
"eval_runtime": 26.0884, |
|
"eval_samples_per_second": 6.708, |
|
"eval_steps_per_second": 1.687, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.4789422135161608e-05, |
|
"loss": 0.5019, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.7657142857142857, |
|
"eval_loss": 0.5529425740242004, |
|
"eval_runtime": 26.1154, |
|
"eval_samples_per_second": 6.701, |
|
"eval_steps_per_second": 1.685, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.2340842311459353e-05, |
|
"loss": 0.515, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_accuracy": 0.7314285714285714, |
|
"eval_loss": 0.571620762348175, |
|
"eval_runtime": 26.0898, |
|
"eval_samples_per_second": 6.708, |
|
"eval_steps_per_second": 1.686, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 9.8922624877571e-06, |
|
"loss": 0.5154, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.7257142857142858, |
|
"eval_loss": 0.5566055774688721, |
|
"eval_runtime": 26.1014, |
|
"eval_samples_per_second": 6.705, |
|
"eval_steps_per_second": 1.686, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.443682664054849e-06, |
|
"loss": 0.5066, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_accuracy": 0.6971428571428572, |
|
"eval_loss": 0.5895078778266907, |
|
"eval_runtime": 26.0898, |
|
"eval_samples_per_second": 6.708, |
|
"eval_steps_per_second": 1.686, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.995102840352596e-06, |
|
"loss": 0.5173, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"eval_accuracy": 0.6914285714285714, |
|
"eval_loss": 0.5952877402305603, |
|
"eval_runtime": 26.0937, |
|
"eval_samples_per_second": 6.707, |
|
"eval_steps_per_second": 1.686, |
|
"step": 1150 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1246, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"total_flos": 4.267635343876915e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|