naughty_davinci / checkpoint-100 /trainer_state.json
tomekkorbak's picture
Training in progress, step 100
f09b9f3
raw
history blame
2.79 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.039619651347068144,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 3.846153846153847e-06,
"loss": 2.7796,
"theoretical_loss": 3.3518567762894107,
"tokens_seen": 2969305088
},
{
"epoch": 0.0,
"learning_rate": 3.846153846153846e-05,
"loss": 2.4642,
"theoretical_loss": 3.3517550749686795,
"tokens_seen": 2970484736
},
{
"epoch": 0.01,
"learning_rate": 7.692307692307693e-05,
"loss": 1.9063,
"theoretical_loss": 3.351642134115535,
"tokens_seen": 2971795456
},
{
"epoch": 0.01,
"learning_rate": 9.983987189751803e-05,
"loss": 1.4437,
"theoretical_loss": 3.351529257004948,
"tokens_seen": 2973106176
},
{
"epoch": 0.02,
"learning_rate": 9.943955164131305e-05,
"loss": 1.273,
"theoretical_loss": 3.3514164435728655,
"tokens_seen": 2974416896
},
{
"epoch": 0.02,
"learning_rate": 9.90392313851081e-05,
"loss": 1.2074,
"theoretical_loss": 3.3513036937553267,
"tokens_seen": 2975727616
},
{
"epoch": 0.02,
"learning_rate": 9.863891112890312e-05,
"loss": 1.2079,
"theoretical_loss": 3.3511910074884628,
"tokens_seen": 2977038336
},
{
"epoch": 0.02,
"objective/train/docs_used": 997783,
"objective/train/instantaneous_batch_size": 8,
"objective/train/instantaneous_microbatch_size": 8192,
"objective/train/original_loss": 0.9412798285484314,
"objective/train/theoretical_loss": 3.3511628458440637,
"objective/train/tokens_used": 28650976,
"theoretical_loss": 3.3511628458440637,
"tokens_seen": 2977366016
},
{
"epoch": 0.03,
"learning_rate": 9.823859087269817e-05,
"loss": 1.2032,
"theoretical_loss": 3.3510783847084977,
"tokens_seen": 2978349056
},
{
"epoch": 0.03,
"learning_rate": 9.78382706164932e-05,
"loss": 1.2178,
"theoretical_loss": 3.350965825351748,
"tokens_seen": 2979659776
},
{
"epoch": 0.04,
"learning_rate": 9.743795036028824e-05,
"loss": 1.189,
"theoretical_loss": 3.3508533293546208,
"tokens_seen": 2980970496
},
{
"epoch": 0.04,
"learning_rate": 9.703763010408326e-05,
"loss": 1.2086,
"theoretical_loss": 3.3507408966536154,
"tokens_seen": 2982281216
}
],
"max_steps": 2524,
"num_train_epochs": 9223372036854775807,
"total_flos": 6689076019200000.0,
"trial_name": null,
"trial_params": null
}