|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.8585209003215435, |
|
"eval_steps": 500, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9986331433523156e-05, |
|
"loss": 0.6891, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.994534068046937e-05, |
|
"loss": 0.5774, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.9877072563625285e-05, |
|
"loss": 0.5359, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.978160173317438e-05, |
|
"loss": 0.541, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.965903258506806e-05, |
|
"loss": 0.5038, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.9509499146870236e-05, |
|
"loss": 0.5397, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.933316493120015e-05, |
|
"loss": 0.5097, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.913022275693372e-05, |
|
"loss": 0.4829, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.8900894538358944e-05, |
|
"loss": 0.503, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.864543104251587e-05, |
|
"loss": 0.535, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.8364111614986527e-05, |
|
"loss": 0.4685, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.805724387443462e-05, |
|
"loss": 0.5223, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.7725163376229064e-05, |
|
"loss": 0.5033, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.736823324551909e-05, |
|
"loss": 0.4991, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.698684378016222e-05, |
|
"loss": 0.5225, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.6581412023939354e-05, |
|
"loss": 0.4711, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.6152381310523387e-05, |
|
"loss": 0.5054, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.5700220778700504e-05, |
|
"loss": 0.5237, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.4992, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.4728512734909844e-05, |
|
"loss": 0.3735, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.421002777142148e-05, |
|
"loss": 0.2735, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.367053692460385e-05, |
|
"loss": 0.2565, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.311063011977723e-05, |
|
"loss": 0.2926, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.2530919606812216e-05, |
|
"loss": 0.281, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.193203929064353e-05, |
|
"loss": 0.2619, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.131464403810422e-05, |
|
"loss": 0.2456, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.067940896183843e-05, |
|
"loss": 0.2442, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.002702868207563e-05, |
|
"loss": 0.2625, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 3.935821656707359e-05, |
|
"loss": 0.2616, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.867370395306068e-05, |
|
"loss": 0.2545, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 3.797423934453038e-05, |
|
"loss": 0.2728, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 3.726058759576271e-05, |
|
"loss": 0.234, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.65335290744672e-05, |
|
"loss": 0.2356, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.579385880846232e-05, |
|
"loss": 0.2369, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 3.504238561632424e-05, |
|
"loss": 0.236, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 3.427993122295552e-05, |
|
"loss": 0.2607, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.350732936104108e-05, |
|
"loss": 0.2637, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 0.2338, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 3.1935072719046115e-05, |
|
"loss": 0.2489, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 3.1137137178519985e-05, |
|
"loss": 0.1337, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 3.0332490768593675e-05, |
|
"loss": 0.1188, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 2.952201335830275e-05, |
|
"loss": 0.1067, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 2.870659119279605e-05, |
|
"loss": 0.1131, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.788711592423966e-05, |
|
"loss": 0.1308, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.7064483636808313e-05, |
|
"loss": 0.1236, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.623959386683056e-05, |
|
"loss": 0.1079, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 2.5413348619158967e-05, |
|
"loss": 0.1102, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 2.458665138084104e-05, |
|
"loss": 0.1218, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 2.3760406133169443e-05, |
|
"loss": 0.1148, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 2.2935516363191693e-05, |
|
"loss": 0.1109, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 2.2112884075760347e-05, |
|
"loss": 0.1088, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.1293408807203947e-05, |
|
"loss": 0.1109, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.047798664169726e-05, |
|
"loss": 0.0939, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.9667509231406334e-05, |
|
"loss": 0.1179, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.8862862821480025e-05, |
|
"loss": 0.095, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.806492728095389e-05, |
|
"loss": 0.115, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.7274575140626318e-05, |
|
"loss": 0.1219, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.6492670638958924e-05, |
|
"loss": 0.1153, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.5720068777044476e-05, |
|
"loss": 0.0693, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.495761438367577e-05, |
|
"loss": 0.0535, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.4206141191537682e-05, |
|
"loss": 0.0452, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.346647092553281e-05, |
|
"loss": 0.0444, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.2739412404237306e-05, |
|
"loss": 0.0364, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.202576065546963e-05, |
|
"loss": 0.0591, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.1326296046939333e-05, |
|
"loss": 0.0466, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.064178343292641e-05, |
|
"loss": 0.0558, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 9.972971317924374e-06, |
|
"loss": 0.0533, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 9.320591038161574e-06, |
|
"loss": 0.0549, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 8.685355961895784e-06, |
|
"loss": 0.0446, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 8.067960709356478e-06, |
|
"loss": 0.0622, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 7.469080393187786e-06, |
|
"loss": 0.0494, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 6.889369880222776e-06, |
|
"loss": 0.0391, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 6.329463075396161e-06, |
|
"loss": 0.0399, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 5.78997222857853e-06, |
|
"loss": 0.0509, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 5.271487265090163e-06, |
|
"loss": 0.0451, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 190, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 25, |
|
"total_flos": 2.0183987688434565e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|