|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 7900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.31645569620253167, |
|
"grad_norm": 16.740053176879883, |
|
"learning_rate": 2.8101265822784812e-05, |
|
"loss": 1.7983, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6329113924050633, |
|
"grad_norm": 18.301847457885742, |
|
"learning_rate": 2.620253164556962e-05, |
|
"loss": 1.1858, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.9493670886075949, |
|
"grad_norm": 19.28789710998535, |
|
"learning_rate": 2.430379746835443e-05, |
|
"loss": 1.0645, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.2658227848101267, |
|
"grad_norm": 27.41464614868164, |
|
"learning_rate": 2.240506329113924e-05, |
|
"loss": 0.7401, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.5822784810126582, |
|
"grad_norm": 22.102096557617188, |
|
"learning_rate": 2.050632911392405e-05, |
|
"loss": 0.7248, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.8987341772151898, |
|
"grad_norm": 29.267433166503906, |
|
"learning_rate": 1.860759493670886e-05, |
|
"loss": 0.7217, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.2151898734177213, |
|
"grad_norm": 14.63181209564209, |
|
"learning_rate": 1.670886075949367e-05, |
|
"loss": 0.4984, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 15.814390182495117, |
|
"learning_rate": 1.4810126582278482e-05, |
|
"loss": 0.4188, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.848101265822785, |
|
"grad_norm": 14.612494468688965, |
|
"learning_rate": 1.2911392405063291e-05, |
|
"loss": 0.4127, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.1645569620253164, |
|
"grad_norm": 39.51222229003906, |
|
"learning_rate": 1.1012658227848103e-05, |
|
"loss": 0.3182, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.481012658227848, |
|
"grad_norm": 13.681534767150879, |
|
"learning_rate": 9.113924050632912e-06, |
|
"loss": 0.2268, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.7974683544303796, |
|
"grad_norm": 11.261092185974121, |
|
"learning_rate": 7.215189873417722e-06, |
|
"loss": 0.2346, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.113924050632911, |
|
"grad_norm": 19.033397674560547, |
|
"learning_rate": 5.3164556962025316e-06, |
|
"loss": 0.1939, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.430379746835443, |
|
"grad_norm": 25.222740173339844, |
|
"learning_rate": 3.4177215189873417e-06, |
|
"loss": 0.1312, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.746835443037975, |
|
"grad_norm": 19.743125915527344, |
|
"learning_rate": 1.518987341772152e-06, |
|
"loss": 0.1167, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 7900, |
|
"total_flos": 1.85781994039296e+16, |
|
"train_loss": 0.5625110177148747, |
|
"train_runtime": 892.8635, |
|
"train_samples_per_second": 106.175, |
|
"train_steps_per_second": 8.848 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 7900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.85781994039296e+16, |
|
"train_batch_size": 12, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|