|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.824561403508772, |
|
"eval_steps": 500, |
|
"global_step": 420, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23391812865497075, |
|
"grad_norm": 11.1328125, |
|
"learning_rate": 0.00019972037971811802, |
|
"loss": 1.1875, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4678362573099415, |
|
"grad_norm": 9.84375, |
|
"learning_rate": 0.00019888308262251285, |
|
"loss": 1.0775, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 13.78125, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 0.9793, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.935672514619883, |
|
"grad_norm": 11.5859375, |
|
"learning_rate": 0.0001955572805786141, |
|
"loss": 0.9431, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1695906432748537, |
|
"grad_norm": 10.28125, |
|
"learning_rate": 0.00019308737486442045, |
|
"loss": 0.9035, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.4035087719298245, |
|
"grad_norm": 10.578125, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.9277, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.6374269005847952, |
|
"grad_norm": 9.734375, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.8933, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.871345029239766, |
|
"grad_norm": 11.3984375, |
|
"learning_rate": 0.0001826238774315995, |
|
"loss": 0.8609, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 11.046875, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 0.8948, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.3391812865497075, |
|
"grad_norm": 12.171875, |
|
"learning_rate": 0.00017330518718298264, |
|
"loss": 0.816, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.573099415204678, |
|
"grad_norm": 12.5390625, |
|
"learning_rate": 0.00016801727377709194, |
|
"loss": 0.799, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.807017543859649, |
|
"grad_norm": 11.2890625, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.8065, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.0409356725146197, |
|
"grad_norm": 13.59375, |
|
"learning_rate": 0.0001563320058063622, |
|
"loss": 0.83, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.2748538011695905, |
|
"grad_norm": 11.03125, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.8086, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.5087719298245617, |
|
"grad_norm": 14.796875, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 0.7818, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.742690058479532, |
|
"grad_norm": 11.8359375, |
|
"learning_rate": 0.00013653410243663952, |
|
"loss": 0.8136, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.976608187134503, |
|
"grad_norm": 12.0859375, |
|
"learning_rate": 0.00012947551744109043, |
|
"loss": 0.7879, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"grad_norm": 13.9765625, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 0.7898, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 13.9921875, |
|
"learning_rate": 0.00011490422661761744, |
|
"loss": 0.7845, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.678362573099415, |
|
"grad_norm": 12.3203125, |
|
"learning_rate": 0.00010747300935864243, |
|
"loss": 0.8112, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.912280701754386, |
|
"grad_norm": 12.375, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7769, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.146198830409356, |
|
"grad_norm": 11.671875, |
|
"learning_rate": 9.252699064135758e-05, |
|
"loss": 0.7792, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.380116959064328, |
|
"grad_norm": 11.34375, |
|
"learning_rate": 8.509577338238255e-05, |
|
"loss": 0.7844, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.614035087719298, |
|
"grad_norm": 11.78125, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 0.7935, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.847953216374269, |
|
"grad_norm": 14.96875, |
|
"learning_rate": 7.052448255890957e-05, |
|
"loss": 0.7794, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.081871345029239, |
|
"grad_norm": 12.1171875, |
|
"learning_rate": 6.34658975633605e-05, |
|
"loss": 0.7809, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.315789473684211, |
|
"grad_norm": 11.609375, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.7735, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 6.549707602339181, |
|
"grad_norm": 14.2109375, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.7914, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.783625730994152, |
|
"grad_norm": 11.5234375, |
|
"learning_rate": 4.3667994193637796e-05, |
|
"loss": 0.7835, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 7.017543859649122, |
|
"grad_norm": 12.3046875, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 0.766, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.251461988304094, |
|
"grad_norm": 10.6875, |
|
"learning_rate": 3.198272622290804e-05, |
|
"loss": 0.7917, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 7.485380116959064, |
|
"grad_norm": 11.7734375, |
|
"learning_rate": 2.669481281701739e-05, |
|
"loss": 0.7678, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 7.719298245614035, |
|
"grad_norm": 11.2734375, |
|
"learning_rate": 2.181685175319702e-05, |
|
"loss": 0.7525, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 7.953216374269006, |
|
"grad_norm": 12.1640625, |
|
"learning_rate": 1.7376122568400532e-05, |
|
"loss": 0.7753, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.187134502923977, |
|
"grad_norm": 10.2578125, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.7736, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.421052631578947, |
|
"grad_norm": 10.484375, |
|
"learning_rate": 9.903113209758096e-06, |
|
"loss": 0.7531, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 8.654970760233919, |
|
"grad_norm": 10.328125, |
|
"learning_rate": 6.9126251355795864e-06, |
|
"loss": 0.7793, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"grad_norm": 13.0546875, |
|
"learning_rate": 4.442719421385922e-06, |
|
"loss": 0.7737, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.12280701754386, |
|
"grad_norm": 10.5625, |
|
"learning_rate": 2.5072087818176382e-06, |
|
"loss": 0.7774, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 9.35672514619883, |
|
"grad_norm": 10.4453125, |
|
"learning_rate": 1.1169173774871478e-06, |
|
"loss": 0.763, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.590643274853802, |
|
"grad_norm": 11.421875, |
|
"learning_rate": 2.7962028188198706e-07, |
|
"loss": 0.76, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 9.824561403508772, |
|
"grad_norm": 10.5390625, |
|
"learning_rate": 0.0, |
|
"loss": 0.7646, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 9.824561403508772, |
|
"step": 420, |
|
"total_flos": 2.74199344054272e+16, |
|
"train_loss": 0.8223134313310896, |
|
"train_runtime": 362.0016, |
|
"train_samples_per_second": 4.724, |
|
"train_steps_per_second": 1.16 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 420, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.74199344054272e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|