Takeshi10Days's picture
Training in progress, step 1000
3575ada verified
raw
history blame
1.55 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.1446540880503147,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_loss": 0.4282287061214447,
"eval_runtime": 3.2319,
"eval_samples_per_second": 959.178,
"eval_steps_per_second": 20.112,
"step": 318
},
{
"epoch": 1.57,
"grad_norm": 0.861090898513794,
"learning_rate": 1.371069182389937e-05,
"loss": 0.6575,
"step": 500
},
{
"epoch": 2.0,
"eval_loss": 0.1691545695066452,
"eval_runtime": 3.2816,
"eval_samples_per_second": 944.667,
"eval_steps_per_second": 19.808,
"step": 636
},
{
"epoch": 3.0,
"eval_loss": 0.09601032733917236,
"eval_runtime": 3.3082,
"eval_samples_per_second": 937.058,
"eval_steps_per_second": 19.648,
"step": 954
},
{
"epoch": 3.14,
"grad_norm": 0.8819959163665771,
"learning_rate": 7.421383647798742e-06,
"loss": 0.1958,
"step": 1000
}
],
"logging_steps": 500,
"max_steps": 1590,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 260941334653608.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.31661862639116345,
"num_train_epochs": 9,
"temperature": 2
}
}