kooff11's picture
Training in progress, step 20, checkpoint
29abbe7 verified
raw
history blame
4.81 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5241605241605242,
"eval_steps": 10,
"global_step": 20,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02620802620802621,
"grad_norm": 0.688605546951294,
"learning_rate": 5e-05,
"loss": 1.9615,
"step": 1
},
{
"epoch": 0.02620802620802621,
"eval_loss": 1.9379749298095703,
"eval_runtime": 6.8873,
"eval_samples_per_second": 37.315,
"eval_steps_per_second": 9.438,
"step": 1
},
{
"epoch": 0.05241605241605242,
"grad_norm": 0.7276808023452759,
"learning_rate": 0.0001,
"loss": 1.9459,
"step": 2
},
{
"epoch": 0.07862407862407862,
"grad_norm": 0.7033953666687012,
"learning_rate": 9.981987442712633e-05,
"loss": 1.9077,
"step": 3
},
{
"epoch": 0.10483210483210484,
"grad_norm": 0.719717264175415,
"learning_rate": 9.928079551738543e-05,
"loss": 1.9109,
"step": 4
},
{
"epoch": 0.13104013104013104,
"grad_norm": 0.6515451669692993,
"learning_rate": 9.838664734667495e-05,
"loss": 1.8254,
"step": 5
},
{
"epoch": 0.15724815724815724,
"grad_norm": 0.6293128728866577,
"learning_rate": 9.714387227305422e-05,
"loss": 1.7722,
"step": 6
},
{
"epoch": 0.18345618345618345,
"grad_norm": 0.5616139769554138,
"learning_rate": 9.55614245194068e-05,
"loss": 1.7974,
"step": 7
},
{
"epoch": 0.20966420966420968,
"grad_norm": 0.6011109948158264,
"learning_rate": 9.365070565805941e-05,
"loss": 1.7045,
"step": 8
},
{
"epoch": 0.23587223587223588,
"grad_norm": 0.6134763956069946,
"learning_rate": 9.142548246219212e-05,
"loss": 1.7578,
"step": 9
},
{
"epoch": 0.2620802620802621,
"grad_norm": 0.6061597466468811,
"learning_rate": 8.890178771592199e-05,
"loss": 1.7537,
"step": 10
},
{
"epoch": 0.2620802620802621,
"eval_loss": 1.690554141998291,
"eval_runtime": 6.9618,
"eval_samples_per_second": 36.916,
"eval_steps_per_second": 9.337,
"step": 10
},
{
"epoch": 0.2882882882882883,
"grad_norm": 0.5569241642951965,
"learning_rate": 8.609780469772623e-05,
"loss": 1.6436,
"step": 11
},
{
"epoch": 0.3144963144963145,
"grad_norm": 0.5158886313438416,
"learning_rate": 8.303373616950408e-05,
"loss": 1.6523,
"step": 12
},
{
"epoch": 0.3407043407043407,
"grad_norm": 0.4968787729740143,
"learning_rate": 7.973165881521434e-05,
"loss": 1.6673,
"step": 13
},
{
"epoch": 0.3669123669123669,
"grad_norm": 0.49128782749176025,
"learning_rate": 7.621536417786159e-05,
"loss": 1.6519,
"step": 14
},
{
"epoch": 0.3931203931203931,
"grad_norm": 0.493300199508667,
"learning_rate": 7.251018724088367e-05,
"loss": 1.624,
"step": 15
},
{
"epoch": 0.41932841932841936,
"grad_norm": 0.5251982808113098,
"learning_rate": 6.864282388901544e-05,
"loss": 1.6108,
"step": 16
},
{
"epoch": 0.44553644553644556,
"grad_norm": 0.4884173274040222,
"learning_rate": 6.464113856382752e-05,
"loss": 1.6367,
"step": 17
},
{
"epoch": 0.47174447174447176,
"grad_norm": 0.5205239653587341,
"learning_rate": 6.0533963499786314e-05,
"loss": 1.6079,
"step": 18
},
{
"epoch": 0.49795249795249796,
"grad_norm": 0.5160295963287354,
"learning_rate": 5.6350890987343944e-05,
"loss": 1.6298,
"step": 19
},
{
"epoch": 0.5241605241605242,
"grad_norm": 0.46167778968811035,
"learning_rate": 5.212206015980742e-05,
"loss": 1.7101,
"step": 20
},
{
"epoch": 0.5241605241605242,
"eval_loss": 1.6339422464370728,
"eval_runtime": 6.9719,
"eval_samples_per_second": 36.862,
"eval_steps_per_second": 9.323,
"step": 20
}
],
"logging_steps": 1,
"max_steps": 39,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.275680826713702e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}