| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 288, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.9092470407485962, | |
| "learning_rate": 0.0001, | |
| "loss": 1.0176, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.5211939811706543, | |
| "learning_rate": 9.968335515358916e-05, | |
| "loss": 0.5103, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.5675566792488098, | |
| "learning_rate": 9.873743117270691e-05, | |
| "loss": 0.3179, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9217229190183123, | |
| "eval_loss": 0.3320535123348236, | |
| "eval_runtime": 22.1298, | |
| "eval_samples_per_second": 17.397, | |
| "eval_steps_per_second": 2.214, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 0.3762319087982178, | |
| "learning_rate": 9.717420893549902e-05, | |
| "loss": 0.3371, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 0.3366687595844269, | |
| "learning_rate": 9.501348789257373e-05, | |
| "loss": 0.2708, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.896, | |
| "grad_norm": 0.3178953528404236, | |
| "learning_rate": 9.2282635291242e-05, | |
| "loss": 0.206, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9407841523692979, | |
| "eval_loss": 0.24253705143928528, | |
| "eval_runtime": 22.201, | |
| "eval_samples_per_second": 17.342, | |
| "eval_steps_per_second": 2.207, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.192, | |
| "grad_norm": 0.40763169527053833, | |
| "learning_rate": 8.90162395476046e-05, | |
| "loss": 0.2057, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.512, | |
| "grad_norm": 0.5000198483467102, | |
| "learning_rate": 8.525567215680398e-05, | |
| "loss": 0.2043, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.832, | |
| "grad_norm": 0.4025309681892395, | |
| "learning_rate": 8.104856369019524e-05, | |
| "loss": 0.1447, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9488746080774451, | |
| "eval_loss": 0.21092161536216736, | |
| "eval_runtime": 22.1138, | |
| "eval_samples_per_second": 17.41, | |
| "eval_steps_per_second": 2.216, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.128, | |
| "grad_norm": 0.45369309186935425, | |
| "learning_rate": 7.644820051634812e-05, | |
| "loss": 0.1352, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.448, | |
| "grad_norm": 0.48971518874168396, | |
| "learning_rate": 7.15128498868873e-05, | |
| "loss": 0.1437, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.768, | |
| "grad_norm": 0.44901880621910095, | |
| "learning_rate": 6.630502193549474e-05, | |
| "loss": 0.1067, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9527208148072254, | |
| "eval_loss": 0.20618033409118652, | |
| "eval_runtime": 22.1302, | |
| "eval_samples_per_second": 17.397, | |
| "eval_steps_per_second": 2.214, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 4.064, | |
| "grad_norm": 0.5145038962364197, | |
| "learning_rate": 6.0890677937442574e-05, | |
| "loss": 0.0916, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.384, | |
| "grad_norm": 0.47537460923194885, | |
| "learning_rate": 5.5338394857677945e-05, | |
| "loss": 0.1133, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.704, | |
| "grad_norm": 0.5391659736633301, | |
| "learning_rate": 4.971849676912172e-05, | |
| "loss": 0.0792, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.45107302069664, | |
| "learning_rate": 4.410216414245771e-05, | |
| "loss": 0.0612, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9539307285858623, | |
| "eval_loss": 0.2128402590751648, | |
| "eval_runtime": 22.0764, | |
| "eval_samples_per_second": 17.439, | |
| "eval_steps_per_second": 2.22, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 0.42281967401504517, | |
| "learning_rate": 3.856053228896442e-05, | |
| "loss": 0.091, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "grad_norm": 0.4220280945301056, | |
| "learning_rate": 3.316379037532644e-05, | |
| "loss": 0.0605, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "grad_norm": 0.42120489478111267, | |
| "learning_rate": 2.798029242211828e-05, | |
| "loss": 0.0491, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9549008431419458, | |
| "eval_loss": 0.2168993204832077, | |
| "eval_runtime": 22.0945, | |
| "eval_samples_per_second": 17.425, | |
| "eval_steps_per_second": 2.218, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 6.256, | |
| "grad_norm": 0.5049369931221008, | |
| "learning_rate": 2.3075691545870558e-05, | |
| "loss": 0.0648, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 6.576, | |
| "grad_norm": 0.30912792682647705, | |
| "learning_rate": 1.8512108410229878e-05, | |
| "loss": 0.0431, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 6.896, | |
| "grad_norm": 0.3820810616016388, | |
| "learning_rate": 1.434734441843899e-05, | |
| "loss": 0.0378, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9584459654324948, | |
| "eval_loss": 0.2166266143321991, | |
| "eval_runtime": 22.0992, | |
| "eval_samples_per_second": 17.421, | |
| "eval_steps_per_second": 2.217, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 7.192, | |
| "grad_norm": 0.37598758935928345, | |
| "learning_rate": 1.063414961267859e-05, | |
| "loss": 0.0383, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 7.5120000000000005, | |
| "grad_norm": 0.4298418164253235, | |
| "learning_rate": 7.41955455290726e-06, | |
| "loss": 0.0415, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 7.832, | |
| "grad_norm": 0.2122349590063095, | |
| "learning_rate": 4.744274637483936e-06, | |
| "loss": 0.0294, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.958849415172974, | |
| "eval_loss": 0.22243177890777588, | |
| "eval_runtime": 22.0378, | |
| "eval_samples_per_second": 17.47, | |
| "eval_steps_per_second": 2.223, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 8.128, | |
| "grad_norm": 0.3120996654033661, | |
| "learning_rate": 2.6421944103256657e-06, | |
| "loss": 0.0265, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 8.448, | |
| "grad_norm": 0.21991710364818573, | |
| "learning_rate": 1.1399383862592927e-06, | |
| "loss": 0.0371, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 8.768, | |
| "grad_norm": 0.2127743661403656, | |
| "learning_rate": 2.5653383040524227e-07, | |
| "loss": 0.0215, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.9587168946755322, | |
| "eval_loss": 0.22601914405822754, | |
| "eval_runtime": 22.0034, | |
| "eval_samples_per_second": 17.497, | |
| "eval_steps_per_second": 2.227, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 288, | |
| "total_flos": 3.408776235570954e+17, | |
| "train_loss": 0.1565158217627969, | |
| "train_runtime": 2068.3583, | |
| "train_samples_per_second": 4.351, | |
| "train_steps_per_second": 0.139 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 288, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.408776235570954e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |