|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 1.0, |
|
"global_step": 32, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0625, |
|
"grad_norm": 125.37348937988281, |
|
"learning_rate": 4.8437500000000005e-05, |
|
"loss": 1.8261, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0625, |
|
"eval_loss": 1.7808233499526978, |
|
"eval_runtime": 101.8647, |
|
"eval_samples_per_second": 1.473, |
|
"eval_steps_per_second": 0.029, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 251.37063598632812, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 1.7935, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"eval_loss": 1.623749852180481, |
|
"eval_runtime": 94.4381, |
|
"eval_samples_per_second": 1.588, |
|
"eval_steps_per_second": 0.032, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1875, |
|
"grad_norm": 117.00857543945312, |
|
"learning_rate": 4.5312500000000004e-05, |
|
"loss": 2.2498, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1875, |
|
"eval_loss": 1.741827130317688, |
|
"eval_runtime": 70.4069, |
|
"eval_samples_per_second": 2.13, |
|
"eval_steps_per_second": 0.043, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 102.61508178710938, |
|
"learning_rate": 4.375e-05, |
|
"loss": 1.9536, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 1.7777817249298096, |
|
"eval_runtime": 72.2698, |
|
"eval_samples_per_second": 2.076, |
|
"eval_steps_per_second": 0.042, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 73.9283676147461, |
|
"learning_rate": 4.21875e-05, |
|
"loss": 2.1207, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"eval_loss": 1.7881625890731812, |
|
"eval_runtime": 70.2676, |
|
"eval_samples_per_second": 2.135, |
|
"eval_steps_per_second": 0.043, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.375, |
|
"grad_norm": 82.38343811035156, |
|
"learning_rate": 4.0625000000000005e-05, |
|
"loss": 2.1623, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.375, |
|
"eval_loss": 1.8577982187271118, |
|
"eval_runtime": 72.0998, |
|
"eval_samples_per_second": 2.08, |
|
"eval_steps_per_second": 0.042, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4375, |
|
"grad_norm": 57.1233024597168, |
|
"learning_rate": 3.90625e-05, |
|
"loss": 2.0764, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.4375, |
|
"eval_loss": 1.8867578506469727, |
|
"eval_runtime": 88.6197, |
|
"eval_samples_per_second": 1.693, |
|
"eval_steps_per_second": 0.034, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 56.74623107910156, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 2.206, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 1.8570754528045654, |
|
"eval_runtime": 68.277, |
|
"eval_samples_per_second": 2.197, |
|
"eval_steps_per_second": 0.044, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5625, |
|
"grad_norm": 55.427146911621094, |
|
"learning_rate": 3.59375e-05, |
|
"loss": 2.2791, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5625, |
|
"eval_loss": 1.8133898973464966, |
|
"eval_runtime": 67.77, |
|
"eval_samples_per_second": 2.213, |
|
"eval_steps_per_second": 0.044, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 48.58964157104492, |
|
"learning_rate": 3.4375e-05, |
|
"loss": 2.2276, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"eval_loss": 1.759128451347351, |
|
"eval_runtime": 66.1185, |
|
"eval_samples_per_second": 2.269, |
|
"eval_steps_per_second": 0.045, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6875, |
|
"grad_norm": 58.89248275756836, |
|
"learning_rate": 3.2812500000000005e-05, |
|
"loss": 1.826, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.6875, |
|
"eval_loss": 1.6688445806503296, |
|
"eval_runtime": 67.5679, |
|
"eval_samples_per_second": 2.22, |
|
"eval_steps_per_second": 0.044, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 46.2577018737793, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.668, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 1.5710270404815674, |
|
"eval_runtime": 67.265, |
|
"eval_samples_per_second": 2.23, |
|
"eval_steps_per_second": 0.045, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8125, |
|
"grad_norm": 42.970054626464844, |
|
"learning_rate": 2.96875e-05, |
|
"loss": 1.8039, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.8125, |
|
"eval_loss": 1.527113914489746, |
|
"eval_runtime": 66.977, |
|
"eval_samples_per_second": 2.24, |
|
"eval_steps_per_second": 0.045, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.875, |
|
"grad_norm": 48.52293395996094, |
|
"learning_rate": 2.8125000000000003e-05, |
|
"loss": 1.7761, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.875, |
|
"eval_loss": 1.497421383857727, |
|
"eval_runtime": 68.3346, |
|
"eval_samples_per_second": 2.195, |
|
"eval_steps_per_second": 0.044, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 43.09657287597656, |
|
"learning_rate": 2.6562500000000002e-05, |
|
"loss": 1.5512, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"eval_loss": 1.479906439781189, |
|
"eval_runtime": 67.7193, |
|
"eval_samples_per_second": 2.215, |
|
"eval_steps_per_second": 0.044, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 49.768165588378906, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.0115, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.4686092138290405, |
|
"eval_runtime": 65.0137, |
|
"eval_samples_per_second": 2.307, |
|
"eval_steps_per_second": 0.046, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.0625, |
|
"grad_norm": 24.204669952392578, |
|
"learning_rate": 2.34375e-05, |
|
"loss": 0.4346, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.0625, |
|
"eval_loss": 1.4616279602050781, |
|
"eval_runtime": 66.0842, |
|
"eval_samples_per_second": 2.27, |
|
"eval_steps_per_second": 0.045, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.125, |
|
"grad_norm": 18.588682174682617, |
|
"learning_rate": 2.1875e-05, |
|
"loss": 0.3421, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.125, |
|
"eval_loss": 1.4559637308120728, |
|
"eval_runtime": 67.549, |
|
"eval_samples_per_second": 2.221, |
|
"eval_steps_per_second": 0.044, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.1875, |
|
"grad_norm": 20.04482078552246, |
|
"learning_rate": 2.0312500000000002e-05, |
|
"loss": 0.3134, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.1875, |
|
"eval_loss": 1.4582462310791016, |
|
"eval_runtime": 65.9148, |
|
"eval_samples_per_second": 2.276, |
|
"eval_steps_per_second": 0.046, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 23.909494400024414, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.2237, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 1.467896819114685, |
|
"eval_runtime": 66.108, |
|
"eval_samples_per_second": 2.269, |
|
"eval_steps_per_second": 0.045, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3125, |
|
"grad_norm": 33.90362548828125, |
|
"learning_rate": 1.71875e-05, |
|
"loss": 0.6518, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.3125, |
|
"eval_loss": 1.4688968658447266, |
|
"eval_runtime": 69.8941, |
|
"eval_samples_per_second": 2.146, |
|
"eval_steps_per_second": 0.043, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.375, |
|
"grad_norm": 30.035762786865234, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 0.3559, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.375, |
|
"eval_loss": 1.4729983806610107, |
|
"eval_runtime": 67.5966, |
|
"eval_samples_per_second": 2.219, |
|
"eval_steps_per_second": 0.044, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4375, |
|
"grad_norm": 28.968109130859375, |
|
"learning_rate": 1.4062500000000001e-05, |
|
"loss": 0.38, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.4375, |
|
"eval_loss": 1.4652482271194458, |
|
"eval_runtime": 68.3514, |
|
"eval_samples_per_second": 2.195, |
|
"eval_steps_per_second": 0.044, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 29.879552841186523, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.3887, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_loss": 1.4595154523849487, |
|
"eval_runtime": 66.69, |
|
"eval_samples_per_second": 2.249, |
|
"eval_steps_per_second": 0.045, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 28.710205078125, |
|
"learning_rate": 1.09375e-05, |
|
"loss": 0.3202, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"eval_loss": 1.4468400478363037, |
|
"eval_runtime": 68.3647, |
|
"eval_samples_per_second": 2.194, |
|
"eval_steps_per_second": 0.044, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.625, |
|
"grad_norm": 27.124183654785156, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 0.2525, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.625, |
|
"eval_loss": 1.438510775566101, |
|
"eval_runtime": 65.3338, |
|
"eval_samples_per_second": 2.296, |
|
"eval_steps_per_second": 0.046, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.6875, |
|
"grad_norm": 31.146799087524414, |
|
"learning_rate": 7.8125e-06, |
|
"loss": 0.5361, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.6875, |
|
"eval_loss": 1.4363871812820435, |
|
"eval_runtime": 79.7494, |
|
"eval_samples_per_second": 1.881, |
|
"eval_steps_per_second": 0.038, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 30.060962677001953, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.3427, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 1.435764193534851, |
|
"eval_runtime": 89.795, |
|
"eval_samples_per_second": 1.67, |
|
"eval_steps_per_second": 0.033, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.8125, |
|
"grad_norm": 28.790109634399414, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"loss": 0.2529, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.8125, |
|
"eval_loss": 1.4345921277999878, |
|
"eval_runtime": 67.6708, |
|
"eval_samples_per_second": 2.217, |
|
"eval_steps_per_second": 0.044, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 26.701284408569336, |
|
"learning_rate": 3.125e-06, |
|
"loss": 0.1857, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"eval_loss": 1.4300572872161865, |
|
"eval_runtime": 68.0502, |
|
"eval_samples_per_second": 2.204, |
|
"eval_steps_per_second": 0.044, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.9375, |
|
"grad_norm": 32.15977096557617, |
|
"learning_rate": 1.5625e-06, |
|
"loss": 0.3143, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.9375, |
|
"eval_loss": 1.424769639968872, |
|
"eval_runtime": 66.6233, |
|
"eval_samples_per_second": 2.251, |
|
"eval_steps_per_second": 0.045, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 24.524354934692383, |
|
"learning_rate": 0.0, |
|
"loss": 0.1912, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.4227830171585083, |
|
"eval_runtime": 68.629, |
|
"eval_samples_per_second": 2.186, |
|
"eval_steps_per_second": 0.044, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 32, |
|
"total_flos": 115984993944204.0, |
|
"train_loss": 1.1255573146045208, |
|
"train_runtime": 4694.1264, |
|
"train_samples_per_second": 0.425, |
|
"train_steps_per_second": 0.007 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 32, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 115984993944204.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|