|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.32, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.8396336436271667, |
|
"learning_rate": 0.00019987329060020616, |
|
"loss": 2.8923, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.7428969740867615, |
|
"learning_rate": 0.00019949348350626456, |
|
"loss": 2.0944, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.5937550067901611, |
|
"learning_rate": 0.00019886154122075343, |
|
"loss": 2.0323, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.4460415542125702, |
|
"learning_rate": 0.00019797906520422677, |
|
"loss": 1.9436, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.4390580952167511, |
|
"learning_rate": 0.00019684829181681234, |
|
"loss": 2.0292, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.43469828367233276, |
|
"learning_rate": 0.00019547208665085457, |
|
"loss": 1.9654, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.48699721693992615, |
|
"learning_rate": 0.0001938539372689649, |
|
"loss": 1.9041, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.5205313563346863, |
|
"learning_rate": 0.00019199794436588243, |
|
"loss": 1.8992, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.5224802494049072, |
|
"learning_rate": 0.00018990881137654258, |
|
"loss": 1.8393, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.5004997849464417, |
|
"learning_rate": 0.0001875918325566888, |
|
"loss": 1.615, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.5422343611717224, |
|
"learning_rate": 0.00018505287956623297, |
|
"loss": 1.9873, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.47483357787132263, |
|
"learning_rate": 0.00018229838658936564, |
|
"loss": 1.9264, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.4191422164440155, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.828, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.41457584500312805, |
|
"learning_rate": 0.00017617123081773591, |
|
"loss": 1.8083, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.524440348148346, |
|
"learning_rate": 0.00017281409538757883, |
|
"loss": 1.8268, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.5396173000335693, |
|
"learning_rate": 0.00016927243535095997, |
|
"loss": 1.7587, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.5633752942085266, |
|
"learning_rate": 0.0001655552259402295, |
|
"loss": 1.7939, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.5931205153465271, |
|
"learning_rate": 0.00016167188726285434, |
|
"loss": 1.848, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.677586019039154, |
|
"learning_rate": 0.00015763226042909455, |
|
"loss": 1.7407, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.5012004971504211, |
|
"learning_rate": 0.0001534465826127801, |
|
"loss": 1.8559, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 312, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 4.552629939339264e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|