|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.989010989010989, |
|
"eval_steps": 500, |
|
"global_step": 408, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07326007326007326, |
|
"grad_norm": 2.0240988731384277, |
|
"learning_rate": 1.2195121951219513e-05, |
|
"loss": 1.493, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14652014652014653, |
|
"grad_norm": 1.0480456352233887, |
|
"learning_rate": 2.4390243902439026e-05, |
|
"loss": 1.1235, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 0.3642168939113617, |
|
"learning_rate": 3.6585365853658535e-05, |
|
"loss": 1.1219, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29304029304029305, |
|
"grad_norm": 0.4966081380844116, |
|
"learning_rate": 4.878048780487805e-05, |
|
"loss": 1.1664, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3663003663003663, |
|
"grad_norm": 0.42003196477890015, |
|
"learning_rate": 4.992584373844853e-05, |
|
"loss": 1.0602, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 0.6992456316947937, |
|
"learning_rate": 4.9670065861258264e-05, |
|
"loss": 1.0122, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 1.130461573600769, |
|
"learning_rate": 4.9233623530708587e-05, |
|
"loss": 1.1042, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5860805860805861, |
|
"grad_norm": 1.0719728469848633, |
|
"learning_rate": 4.861971291236772e-05, |
|
"loss": 0.9888, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 0.9262090921401978, |
|
"learning_rate": 4.7832829812079794e-05, |
|
"loss": 1.0949, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7326007326007326, |
|
"grad_norm": 0.842754602432251, |
|
"learning_rate": 4.687873675216522e-05, |
|
"loss": 1.0847, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8058608058608059, |
|
"grad_norm": 0.7069523930549622, |
|
"learning_rate": 4.576442077117073e-05, |
|
"loss": 1.0386, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8791208791208791, |
|
"grad_norm": 1.297459602355957, |
|
"learning_rate": 4.4498042256211164e-05, |
|
"loss": 1.0071, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 3.424927234649658, |
|
"learning_rate": 4.308887518261507e-05, |
|
"loss": 0.9977, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0256410256410255, |
|
"grad_norm": 1.1368341445922852, |
|
"learning_rate": 4.154723919851291e-05, |
|
"loss": 1.0333, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.098901098901099, |
|
"grad_norm": 1.1726893186569214, |
|
"learning_rate": 3.9884424051727556e-05, |
|
"loss": 0.8241, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1721611721611722, |
|
"grad_norm": 0.8981992602348328, |
|
"learning_rate": 3.8112606912406044e-05, |
|
"loss": 0.7771, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.2454212454212454, |
|
"grad_norm": 1.5086628198623657, |
|
"learning_rate": 3.624476319685771e-05, |
|
"loss": 0.7341, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3186813186813187, |
|
"grad_norm": 0.9521022439002991, |
|
"learning_rate": 3.429457154565565e-05, |
|
"loss": 0.7059, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3919413919413919, |
|
"grad_norm": 2.737773895263672, |
|
"learning_rate": 3.227631365186836e-05, |
|
"loss": 0.7403, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.4652014652014653, |
|
"grad_norm": 0.8319964408874512, |
|
"learning_rate": 3.020476967300212e-05, |
|
"loss": 0.7738, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 1.0483458042144775, |
|
"learning_rate": 2.8095109992575823e-05, |
|
"loss": 0.6995, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.6117216117216118, |
|
"grad_norm": 1.0540846586227417, |
|
"learning_rate": 2.5962784123982843e-05, |
|
"loss": 0.7363, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.684981684981685, |
|
"grad_norm": 1.6841723918914795, |
|
"learning_rate": 2.3823407570221813e-05, |
|
"loss": 0.7735, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.7582417582417582, |
|
"grad_norm": 1.5593695640563965, |
|
"learning_rate": 2.1692647468048236e-05, |
|
"loss": 0.6975, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.8315018315018317, |
|
"grad_norm": 0.8219186067581177, |
|
"learning_rate": 1.9586107854000326e-05, |
|
"loss": 0.7239, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 1.9310203790664673, |
|
"learning_rate": 1.7519215392522025e-05, |
|
"loss": 0.7285, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.978021978021978, |
|
"grad_norm": 0.888729989528656, |
|
"learning_rate": 1.5507106403021894e-05, |
|
"loss": 0.7415, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.051282051282051, |
|
"grad_norm": 0.5562293529510498, |
|
"learning_rate": 1.3564516013194023e-05, |
|
"loss": 0.5019, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.1245421245421245, |
|
"grad_norm": 1.5220221281051636, |
|
"learning_rate": 1.1705670250356415e-05, |
|
"loss": 0.5581, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.197802197802198, |
|
"grad_norm": 1.251187801361084, |
|
"learning_rate": 9.944181861046186e-06, |
|
"loss": 0.495, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.271062271062271, |
|
"grad_norm": 0.9048457145690918, |
|
"learning_rate": 8.292950621808022e-06, |
|
"loss": 0.5017, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.3443223443223444, |
|
"grad_norm": 0.9046669602394104, |
|
"learning_rate": 6.764068871222826e-06, |
|
"loss": 0.5482, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.4175824175824174, |
|
"grad_norm": 1.1838064193725586, |
|
"learning_rate": 5.368732954986388e-06, |
|
"loss": 0.4744, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.490842490842491, |
|
"grad_norm": 0.6814486384391785, |
|
"learning_rate": 4.117161232546024e-06, |
|
"loss": 0.5586, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.564102564102564, |
|
"grad_norm": 0.7199404239654541, |
|
"learning_rate": 3.0185192457509894e-06, |
|
"loss": 0.4924, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.6373626373626373, |
|
"grad_norm": 1.0055203437805176, |
|
"learning_rate": 2.0808525975233805e-06, |
|
"loss": 0.4985, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.7106227106227108, |
|
"grad_norm": 0.7133710384368896, |
|
"learning_rate": 1.311028032094369e-06, |
|
"loss": 0.4771, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.7838827838827838, |
|
"grad_norm": 1.280009388923645, |
|
"learning_rate": 7.146831482883115e-07, |
|
"loss": 0.4619, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 1.0090081691741943, |
|
"learning_rate": 2.961851141157046e-07, |
|
"loss": 0.5103, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.9304029304029307, |
|
"grad_norm": 0.7528389096260071, |
|
"learning_rate": 5.85986850174608e-08, |
|
"loss": 0.5113, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.989010989010989, |
|
"step": 408, |
|
"total_flos": 3.5468284402335744e+16, |
|
"train_loss": 0.7816439504716911, |
|
"train_runtime": 3976.7208, |
|
"train_samples_per_second": 0.823, |
|
"train_steps_per_second": 0.103 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 408, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.5468284402335744e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|