|
{ |
|
"best_metric": 0.010238240472972393, |
|
"best_model_checkpoint": "./results/checkpoint-14000", |
|
"epoch": 0.8173273395995097, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9708097378714462e-05, |
|
"loss": 0.014, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.9974897107329461, |
|
"eval_loss": 0.014051680453121662, |
|
"eval_runtime": 272.9865, |
|
"eval_samples_per_second": 125.497, |
|
"eval_steps_per_second": 15.689, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9416194757428922e-05, |
|
"loss": 0.0068, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_accuracy": 0.9936658980122012, |
|
"eval_loss": 0.03880644589662552, |
|
"eval_runtime": 271.4393, |
|
"eval_samples_per_second": 126.212, |
|
"eval_steps_per_second": 15.779, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9124292136143383e-05, |
|
"loss": 0.0065, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_accuracy": 0.9963221343296652, |
|
"eval_loss": 0.034014396369457245, |
|
"eval_runtime": 271.2782, |
|
"eval_samples_per_second": 126.287, |
|
"eval_steps_per_second": 15.788, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.8832389514857846e-05, |
|
"loss": 0.0088, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.9975189001430281, |
|
"eval_loss": 0.01912350207567215, |
|
"eval_runtime": 271.5387, |
|
"eval_samples_per_second": 126.166, |
|
"eval_steps_per_second": 15.773, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.8540486893572303e-05, |
|
"loss": 0.0081, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_accuracy": 0.9977816048337663, |
|
"eval_loss": 0.01667490415275097, |
|
"eval_runtime": 271.3282, |
|
"eval_samples_per_second": 126.264, |
|
"eval_steps_per_second": 15.785, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8248584272286767e-05, |
|
"loss": 0.0142, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.9969351119413876, |
|
"eval_loss": 0.017350222915410995, |
|
"eval_runtime": 271.4826, |
|
"eval_samples_per_second": 126.192, |
|
"eval_steps_per_second": 15.776, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.7956681651001227e-05, |
|
"loss": 0.0066, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.9967891648909776, |
|
"eval_loss": 0.025246594101190567, |
|
"eval_runtime": 271.289, |
|
"eval_samples_per_second": 126.282, |
|
"eval_steps_per_second": 15.788, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.7664779029715688e-05, |
|
"loss": 0.0078, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_accuracy": 0.9965848390204034, |
|
"eval_loss": 0.02241995558142662, |
|
"eval_runtime": 271.2726, |
|
"eval_samples_per_second": 126.29, |
|
"eval_steps_per_second": 15.789, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.7372876408430148e-05, |
|
"loss": 0.0062, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.9968183543010596, |
|
"eval_loss": 0.021609827876091003, |
|
"eval_runtime": 271.3057, |
|
"eval_samples_per_second": 126.275, |
|
"eval_steps_per_second": 15.787, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.7080973787144612e-05, |
|
"loss": 0.0047, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.9971978166321258, |
|
"eval_loss": 0.01993330754339695, |
|
"eval_runtime": 271.1898, |
|
"eval_samples_per_second": 126.328, |
|
"eval_steps_per_second": 15.793, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.678907116585907e-05, |
|
"loss": 0.0035, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_accuracy": 0.9968767331212236, |
|
"eval_loss": 0.025054221972823143, |
|
"eval_runtime": 272.0376, |
|
"eval_samples_per_second": 125.935, |
|
"eval_steps_per_second": 15.744, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.6497168544573532e-05, |
|
"loss": 0.0147, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_accuracy": 0.997343763682536, |
|
"eval_loss": 0.017792223021388054, |
|
"eval_runtime": 271.3522, |
|
"eval_samples_per_second": 126.253, |
|
"eval_steps_per_second": 15.784, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.6205265923287993e-05, |
|
"loss": 0.0093, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_accuracy": 0.9971102484018798, |
|
"eval_loss": 0.019385505467653275, |
|
"eval_runtime": 270.7817, |
|
"eval_samples_per_second": 126.519, |
|
"eval_steps_per_second": 15.817, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.5913363302002453e-05, |
|
"loss": 0.0084, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.9974897107329461, |
|
"eval_loss": 0.01369693223387003, |
|
"eval_runtime": 270.7077, |
|
"eval_samples_per_second": 126.553, |
|
"eval_steps_per_second": 15.821, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.5621460680716913e-05, |
|
"loss": 0.0135, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.9967307860708136, |
|
"eval_loss": 0.023163480684161186, |
|
"eval_runtime": 270.9905, |
|
"eval_samples_per_second": 126.421, |
|
"eval_steps_per_second": 15.805, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.5329558059431374e-05, |
|
"loss": 0.0131, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.9974605213228641, |
|
"eval_loss": 0.015544239431619644, |
|
"eval_runtime": 270.8012, |
|
"eval_samples_per_second": 126.51, |
|
"eval_steps_per_second": 15.816, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.5037655438145836e-05, |
|
"loss": 0.0196, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_accuracy": 0.9965556496103214, |
|
"eval_loss": 0.01619912125170231, |
|
"eval_runtime": 272.0032, |
|
"eval_samples_per_second": 125.951, |
|
"eval_steps_per_second": 15.746, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.4745752816860298e-05, |
|
"loss": 0.0207, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.9978691730640123, |
|
"eval_loss": 0.011031306348741055, |
|
"eval_runtime": 270.862, |
|
"eval_samples_per_second": 126.481, |
|
"eval_steps_per_second": 15.812, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.4453850195574756e-05, |
|
"loss": 0.0172, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.9969059225313056, |
|
"eval_loss": 0.013371887616813183, |
|
"eval_runtime": 270.9823, |
|
"eval_samples_per_second": 126.425, |
|
"eval_steps_per_second": 15.805, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.4161947574289218e-05, |
|
"loss": 0.0147, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_accuracy": 0.9956799673078607, |
|
"eval_loss": 0.018292322754859924, |
|
"eval_runtime": 271.0579, |
|
"eval_samples_per_second": 126.39, |
|
"eval_steps_per_second": 15.801, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.3870044953003679e-05, |
|
"loss": 0.0115, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.9976356577833562, |
|
"eval_loss": 0.013687117025256157, |
|
"eval_runtime": 270.8313, |
|
"eval_samples_per_second": 126.496, |
|
"eval_steps_per_second": 15.814, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.357814233171814e-05, |
|
"loss": 0.0058, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_accuracy": 0.9978399836539303, |
|
"eval_loss": 0.01474874746054411, |
|
"eval_runtime": 270.6785, |
|
"eval_samples_per_second": 126.567, |
|
"eval_steps_per_second": 15.823, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.32862397104326e-05, |
|
"loss": 0.0127, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_accuracy": 0.9975189001430281, |
|
"eval_loss": 0.014400546438992023, |
|
"eval_runtime": 271.0019, |
|
"eval_samples_per_second": 126.416, |
|
"eval_steps_per_second": 15.804, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2994337089147061e-05, |
|
"loss": 0.0158, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_accuracy": 0.9978399836539303, |
|
"eval_loss": 0.011529939249157906, |
|
"eval_runtime": 271.0144, |
|
"eval_samples_per_second": 126.41, |
|
"eval_steps_per_second": 15.804, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.2702434467861522e-05, |
|
"loss": 0.0149, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.9978983624740944, |
|
"eval_loss": 0.011057616211473942, |
|
"eval_runtime": 270.832, |
|
"eval_samples_per_second": 126.495, |
|
"eval_steps_per_second": 15.814, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.2410531846575984e-05, |
|
"loss": 0.0112, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.9975772789631921, |
|
"eval_loss": 0.013288214802742004, |
|
"eval_runtime": 270.8227, |
|
"eval_samples_per_second": 126.5, |
|
"eval_steps_per_second": 15.815, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.2118629225290444e-05, |
|
"loss": 0.0163, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_accuracy": 0.997372953092618, |
|
"eval_loss": 0.012437746860086918, |
|
"eval_runtime": 270.7754, |
|
"eval_samples_per_second": 126.522, |
|
"eval_steps_per_second": 15.818, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.1826726604004906e-05, |
|
"loss": 0.0107, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_accuracy": 0.9979859307043405, |
|
"eval_loss": 0.010238240472972393, |
|
"eval_runtime": 271.1151, |
|
"eval_samples_per_second": 126.363, |
|
"eval_steps_per_second": 15.798, |
|
"step": 14000 |
|
} |
|
], |
|
"max_steps": 34258, |
|
"num_train_epochs": 2, |
|
"total_flos": 1.4836348649472e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|