|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.24509803921568626, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0024509803921568627, |
|
"eval_loss": 12.454689025878906, |
|
"eval_runtime": 5.6213, |
|
"eval_samples_per_second": 122.392, |
|
"eval_steps_per_second": 15.299, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007352941176470588, |
|
"grad_norm": 0.008854148909449577, |
|
"learning_rate": 3e-05, |
|
"loss": 12.4564, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.014705882352941176, |
|
"grad_norm": 0.009888158179819584, |
|
"learning_rate": 6e-05, |
|
"loss": 12.4569, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.022058823529411766, |
|
"grad_norm": 0.007828481495380402, |
|
"learning_rate": 9e-05, |
|
"loss": 12.4527, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.022058823529411766, |
|
"eval_loss": 12.454665184020996, |
|
"eval_runtime": 5.6464, |
|
"eval_samples_per_second": 121.848, |
|
"eval_steps_per_second": 15.231, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.029411764705882353, |
|
"grad_norm": 0.0062802694737911224, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 12.459, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03676470588235294, |
|
"grad_norm": 0.008460123091936111, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 12.4535, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04411764705882353, |
|
"grad_norm": 0.008190251886844635, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 12.4544, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04411764705882353, |
|
"eval_loss": 12.454602241516113, |
|
"eval_runtime": 5.6416, |
|
"eval_samples_per_second": 121.952, |
|
"eval_steps_per_second": 15.244, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.051470588235294115, |
|
"grad_norm": 0.008424337953329086, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 12.4507, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.058823529411764705, |
|
"grad_norm": 0.009385749697685242, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 12.457, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0661764705882353, |
|
"grad_norm": 0.0088015366345644, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 12.4589, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0661764705882353, |
|
"eval_loss": 12.454548835754395, |
|
"eval_runtime": 5.6402, |
|
"eval_samples_per_second": 121.982, |
|
"eval_steps_per_second": 15.248, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07352941176470588, |
|
"grad_norm": 0.009232423268258572, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 12.4546, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08088235294117647, |
|
"grad_norm": 0.008932845667004585, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 12.4569, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08823529411764706, |
|
"grad_norm": 0.008380930870771408, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 12.4534, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08823529411764706, |
|
"eval_loss": 12.454479217529297, |
|
"eval_runtime": 5.6394, |
|
"eval_samples_per_second": 122.0, |
|
"eval_steps_per_second": 15.25, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09558823529411764, |
|
"grad_norm": 0.007541842758655548, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 12.4577, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10294117647058823, |
|
"grad_norm": 0.009072437882423401, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 12.4512, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11029411764705882, |
|
"grad_norm": 0.00889171939343214, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 12.4593, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11029411764705882, |
|
"eval_loss": 12.454415321350098, |
|
"eval_runtime": 5.6364, |
|
"eval_samples_per_second": 122.063, |
|
"eval_steps_per_second": 15.258, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11764705882352941, |
|
"grad_norm": 0.011012999340891838, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 12.4561, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 0.00962892733514309, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 12.4539, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1323529411764706, |
|
"grad_norm": 0.00907512754201889, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 12.4555, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.1323529411764706, |
|
"eval_loss": 12.454337120056152, |
|
"eval_runtime": 5.6351, |
|
"eval_samples_per_second": 122.091, |
|
"eval_steps_per_second": 15.261, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13970588235294118, |
|
"grad_norm": 0.009578227065503597, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 12.456, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.14705882352941177, |
|
"grad_norm": 0.009201650507748127, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 12.4541, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15441176470588236, |
|
"grad_norm": 0.01162680983543396, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 12.4568, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.15441176470588236, |
|
"eval_loss": 12.454294204711914, |
|
"eval_runtime": 5.6362, |
|
"eval_samples_per_second": 122.067, |
|
"eval_steps_per_second": 15.258, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.16176470588235295, |
|
"grad_norm": 0.009980561211705208, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 12.4532, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.16911764705882354, |
|
"grad_norm": 0.009362083859741688, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 12.4559, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.17647058823529413, |
|
"grad_norm": 0.012317374348640442, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 12.4575, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17647058823529413, |
|
"eval_loss": 12.454255104064941, |
|
"eval_runtime": 5.6442, |
|
"eval_samples_per_second": 121.896, |
|
"eval_steps_per_second": 15.237, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.18382352941176472, |
|
"grad_norm": 0.009673560969531536, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 12.4546, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.19117647058823528, |
|
"grad_norm": 0.009031685069203377, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 12.4564, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.19852941176470587, |
|
"grad_norm": 0.012030497193336487, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 12.4545, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.19852941176470587, |
|
"eval_loss": 12.454232215881348, |
|
"eval_runtime": 5.6389, |
|
"eval_samples_per_second": 122.01, |
|
"eval_steps_per_second": 15.251, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.20588235294117646, |
|
"grad_norm": 0.013549219816923141, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 12.455, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.21323529411764705, |
|
"grad_norm": 0.011278319172561169, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 12.4558, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.22058823529411764, |
|
"grad_norm": 0.009173925966024399, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 12.4548, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22058823529411764, |
|
"eval_loss": 12.4542236328125, |
|
"eval_runtime": 5.6365, |
|
"eval_samples_per_second": 122.062, |
|
"eval_steps_per_second": 15.258, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22794117647058823, |
|
"grad_norm": 0.010029138065874577, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 12.4577, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 0.009600088000297546, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 12.4516, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.2426470588235294, |
|
"grad_norm": 0.01012326404452324, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 12.4586, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2426470588235294, |
|
"eval_loss": 12.45422077178955, |
|
"eval_runtime": 5.6384, |
|
"eval_samples_per_second": 122.021, |
|
"eval_steps_per_second": 15.253, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 123312537600.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|