|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9936305732484076, |
|
"eval_steps": 500, |
|
"global_step": 78, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"grad_norm": 9.419306860000189, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.2082, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06369426751592357, |
|
"grad_norm": 11.154561988265748, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.1572, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12738853503184713, |
|
"grad_norm": 0.9580718367338527, |
|
"learning_rate": 1.9959742939952393e-05, |
|
"loss": 0.926, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1910828025477707, |
|
"grad_norm": 0.6962618582606526, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 0.8462, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25477707006369427, |
|
"grad_norm": 0.652360226118771, |
|
"learning_rate": 1.8584487936018663e-05, |
|
"loss": 0.8207, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 0.6036498850889884, |
|
"learning_rate": 1.7227948638273918e-05, |
|
"loss": 0.8328, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3821656050955414, |
|
"grad_norm": 0.6106437850436714, |
|
"learning_rate": 1.5508969814521026e-05, |
|
"loss": 0.8184, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.445859872611465, |
|
"grad_norm": 0.6112027883233256, |
|
"learning_rate": 1.3513748240813429e-05, |
|
"loss": 0.829, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5095541401273885, |
|
"grad_norm": 0.5896867860825263, |
|
"learning_rate": 1.1342332658176556e-05, |
|
"loss": 0.8223, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5732484076433121, |
|
"grad_norm": 0.6019677830367037, |
|
"learning_rate": 9.103606910965666e-06, |
|
"loss": 0.7841, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 0.5830221008800777, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 0.8322, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7006369426751592, |
|
"grad_norm": 0.5839501242867685, |
|
"learning_rate": 4.87100722594094e-06, |
|
"loss": 0.8069, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7643312101910829, |
|
"grad_norm": 0.5957554055545055, |
|
"learning_rate": 3.089373510131354e-06, |
|
"loss": 0.7856, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8280254777070064, |
|
"grad_norm": 0.5870067617046513, |
|
"learning_rate": 1.6542674627869738e-06, |
|
"loss": 0.8141, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.89171974522293, |
|
"grad_norm": 0.557541376858347, |
|
"learning_rate": 6.37651293602628e-07, |
|
"loss": 0.7947, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 0.5998913867246424, |
|
"learning_rate": 9.0502382320653e-08, |
|
"loss": 0.8347, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"eval_loss": 0.8263773322105408, |
|
"eval_runtime": 9.6507, |
|
"eval_samples_per_second": 51.81, |
|
"eval_steps_per_second": 1.658, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"step": 78, |
|
"total_flos": 11234023833600.0, |
|
"train_loss": 0.8476498554914426, |
|
"train_runtime": 656.6949, |
|
"train_samples_per_second": 15.228, |
|
"train_steps_per_second": 0.119 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 78, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 11234023833600.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|