|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.08458646616541353, |
|
"eval_steps": 5, |
|
"global_step": 45, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018796992481203006, |
|
"eval_loss": 2.188689947128296, |
|
"eval_runtime": 15.8759, |
|
"eval_samples_per_second": 28.219, |
|
"eval_steps_per_second": 7.055, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005639097744360902, |
|
"grad_norm": 2.3131868839263916, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1981, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009398496240601503, |
|
"eval_loss": 2.0904104709625244, |
|
"eval_runtime": 15.9911, |
|
"eval_samples_per_second": 28.016, |
|
"eval_steps_per_second": 7.004, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.011278195488721804, |
|
"grad_norm": 1.1980903148651123, |
|
"learning_rate": 6e-05, |
|
"loss": 2.1063, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.016917293233082706, |
|
"grad_norm": 1.9292231798171997, |
|
"learning_rate": 9e-05, |
|
"loss": 2.0794, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.018796992481203006, |
|
"eval_loss": 1.9134695529937744, |
|
"eval_runtime": 16.0172, |
|
"eval_samples_per_second": 27.97, |
|
"eval_steps_per_second": 6.992, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.022556390977443608, |
|
"grad_norm": 1.6164668798446655, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 1.9332, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02819548872180451, |
|
"grad_norm": 1.1573011875152588, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.7655, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02819548872180451, |
|
"eval_loss": 1.7978755235671997, |
|
"eval_runtime": 16.0385, |
|
"eval_samples_per_second": 27.933, |
|
"eval_steps_per_second": 6.983, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03383458646616541, |
|
"grad_norm": 1.463262677192688, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.7978, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03759398496240601, |
|
"eval_loss": 1.7174512147903442, |
|
"eval_runtime": 16.0423, |
|
"eval_samples_per_second": 27.926, |
|
"eval_steps_per_second": 6.982, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.039473684210526314, |
|
"grad_norm": 1.2767959833145142, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 1.724, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.045112781954887216, |
|
"grad_norm": 1.445420265197754, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 1.7438, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.046992481203007516, |
|
"eval_loss": 1.6554962396621704, |
|
"eval_runtime": 16.0491, |
|
"eval_samples_per_second": 27.914, |
|
"eval_steps_per_second": 6.979, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05075187969924812, |
|
"grad_norm": 1.6344633102416992, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 1.609, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05639097744360902, |
|
"grad_norm": 1.6545953750610352, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5911, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05639097744360902, |
|
"eval_loss": 1.6130125522613525, |
|
"eval_runtime": 16.0529, |
|
"eval_samples_per_second": 27.908, |
|
"eval_steps_per_second": 6.977, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06203007518796992, |
|
"grad_norm": 1.566185712814331, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 1.5506, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06578947368421052, |
|
"eval_loss": 1.5798027515411377, |
|
"eval_runtime": 16.0818, |
|
"eval_samples_per_second": 27.858, |
|
"eval_steps_per_second": 6.964, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06766917293233082, |
|
"grad_norm": 1.518865704536438, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 1.5894, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07330827067669173, |
|
"grad_norm": 1.5283524990081787, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 1.592, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07518796992481203, |
|
"eval_loss": 1.5614439249038696, |
|
"eval_runtime": 16.0674, |
|
"eval_samples_per_second": 27.883, |
|
"eval_steps_per_second": 6.971, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 1.3438414335250854, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.6063, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.08458646616541353, |
|
"grad_norm": 1.4435805082321167, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.5254, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08458646616541353, |
|
"eval_loss": 1.552406907081604, |
|
"eval_runtime": 16.0663, |
|
"eval_samples_per_second": 27.885, |
|
"eval_steps_per_second": 6.971, |
|
"step": 45 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8940733763420160.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|