Jiali's picture
Training in progress, step 2500
84f9411
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.861635220125786,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.6061290322580645,
"eval_loss": 0.2067662924528122,
"eval_runtime": 1.83,
"eval_samples_per_second": 1693.985,
"eval_steps_per_second": 35.519,
"step": 318
},
{
"epoch": 1.57,
"learning_rate": 1.650593990216632e-05,
"loss": 0.3335,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.8312903225806452,
"eval_loss": 0.09916964918375015,
"eval_runtime": 2.1477,
"eval_samples_per_second": 1443.399,
"eval_steps_per_second": 30.265,
"step": 636
},
{
"epoch": 3.0,
"eval_accuracy": 0.8816129032258064,
"eval_loss": 0.06577017158269882,
"eval_runtime": 1.8521,
"eval_samples_per_second": 1673.812,
"eval_steps_per_second": 35.096,
"step": 954
},
{
"epoch": 3.14,
"learning_rate": 1.3011879804332637e-05,
"loss": 0.1145,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.8987096774193548,
"eval_loss": 0.04967508092522621,
"eval_runtime": 1.8807,
"eval_samples_per_second": 1648.287,
"eval_steps_per_second": 34.561,
"step": 1272
},
{
"epoch": 4.72,
"learning_rate": 9.517819706498952e-06,
"loss": 0.0721,
"step": 1500
},
{
"epoch": 5.0,
"eval_accuracy": 0.9074193548387097,
"eval_loss": 0.04028124362230301,
"eval_runtime": 1.8504,
"eval_samples_per_second": 1675.341,
"eval_steps_per_second": 35.128,
"step": 1590
},
{
"epoch": 6.0,
"eval_accuracy": 0.9187096774193548,
"eval_loss": 0.03542502969503403,
"eval_runtime": 1.8593,
"eval_samples_per_second": 1667.283,
"eval_steps_per_second": 34.959,
"step": 1908
},
{
"epoch": 6.29,
"learning_rate": 6.02375960866527e-06,
"loss": 0.0569,
"step": 2000
},
{
"epoch": 7.0,
"eval_accuracy": 0.9206451612903226,
"eval_loss": 0.03249173238873482,
"eval_runtime": 2.3133,
"eval_samples_per_second": 1340.088,
"eval_steps_per_second": 28.099,
"step": 2226
},
{
"epoch": 7.86,
"learning_rate": 2.5296995108315863e-06,
"loss": 0.0496,
"step": 2500
}
],
"logging_steps": 500,
"max_steps": 2862,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 500,
"total_flos": 650483488638228.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.08109385800901192,
"num_train_epochs": 9,
"temperature": 8
}
}