|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 25070, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.9401675309134427e-05, |
|
"loss": 0.7213, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.8803350618268846e-05, |
|
"loss": 0.463, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.8205025927403272e-05, |
|
"loss": 0.4433, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.7606701236537695e-05, |
|
"loss": 0.3953, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.700837654567212e-05, |
|
"loss": 0.2915, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.641005185480654e-05, |
|
"loss": 0.3485, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.5811727163940966e-05, |
|
"loss": 0.3159, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 2.521340247307539e-05, |
|
"loss": 0.2976, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.4615077782209815e-05, |
|
"loss": 0.3095, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.4016753091344235e-05, |
|
"loss": 0.2794, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.341842840047866e-05, |
|
"loss": 0.2936, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 2.2820103709613083e-05, |
|
"loss": 0.2691, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.222177901874751e-05, |
|
"loss": 0.2911, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.162345432788193e-05, |
|
"loss": 0.3109, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.1025129637016355e-05, |
|
"loss": 0.3147, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 2.042680494615078e-05, |
|
"loss": 0.2418, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.98284802552852e-05, |
|
"loss": 0.2512, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.9230155564419626e-05, |
|
"loss": 0.2615, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.863183087355405e-05, |
|
"loss": 0.2867, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.8033506182688475e-05, |
|
"loss": 0.2778, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.7435181491822894e-05, |
|
"loss": 0.2556, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.683685680095732e-05, |
|
"loss": 0.2778, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.6238532110091743e-05, |
|
"loss": 0.2175, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.564020741922617e-05, |
|
"loss": 0.2036, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.504188272836059e-05, |
|
"loss": 0.2023, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.4443558037495015e-05, |
|
"loss": 0.1519, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.3845233346629439e-05, |
|
"loss": 0.1213, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.3246908655763862e-05, |
|
"loss": 0.1656, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.2648583964898285e-05, |
|
"loss": 0.1239, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.2050259274032709e-05, |
|
"loss": 0.1242, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.1451934583167132e-05, |
|
"loss": 0.0917, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.0853609892301556e-05, |
|
"loss": 0.0922, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.0255285201435979e-05, |
|
"loss": 0.1306, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 9.656960510570403e-06, |
|
"loss": 0.0843, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 9.058635819704826e-06, |
|
"loss": 0.1032, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 8.46031112883925e-06, |
|
"loss": 0.1202, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 7.861986437973673e-06, |
|
"loss": 0.1296, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.263661747108097e-06, |
|
"loss": 0.1354, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 6.665337056242521e-06, |
|
"loss": 0.1039, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 6.0670123653769444e-06, |
|
"loss": 0.0892, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 5.468687674511368e-06, |
|
"loss": 0.096, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.8703629836457915e-06, |
|
"loss": 0.1045, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.272038292780215e-06, |
|
"loss": 0.0965, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.673713601914639e-06, |
|
"loss": 0.0805, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.0753889110490626e-06, |
|
"loss": 0.1027, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.4770642201834866e-06, |
|
"loss": 0.1287, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.87873952931791e-06, |
|
"loss": 0.0844, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.2804148384523335e-06, |
|
"loss": 0.0861, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 6.820901475867571e-07, |
|
"loss": 0.1154, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 8.37654567211807e-08, |
|
"loss": 0.0838, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 25070, |
|
"total_flos": 9.312702234262733e+16, |
|
"train_loss": 0.2107836102126935, |
|
"train_runtime": 30875.5596, |
|
"train_samples_per_second": 3.248, |
|
"train_steps_per_second": 0.812 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 25070, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 9.312702234262733e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|