phogpt_vlmu_100rows_10epoch / trainer_state.json
nluai's picture
Upload trainer_state.json with huggingface_hub
32ec760 verified
raw
history blame
2.33 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.3465218544006348,
"learning_rate": 9.846798312018146e-05,
"loss": 2.7328,
"step": 100
},
{
"epoch": 2.0,
"grad_norm": 0.7282734513282776,
"learning_rate": 9.230035528339211e-05,
"loss": 1.6124,
"step": 200
},
{
"epoch": 3.0,
"grad_norm": 0.8552231788635254,
"learning_rate": 8.199207394755893e-05,
"loss": 1.2943,
"step": 300
},
{
"epoch": 4.0,
"grad_norm": 1.293209195137024,
"learning_rate": 6.855218551185255e-05,
"loss": 1.0132,
"step": 400
},
{
"epoch": 5.0,
"grad_norm": 2.836729049682617,
"learning_rate": 5.32962798975689e-05,
"loss": 0.7495,
"step": 500
},
{
"epoch": 6.0,
"grad_norm": 1.0586963891983032,
"learning_rate": 3.7717711440378694e-05,
"loss": 0.561,
"step": 600
},
{
"epoch": 7.0,
"grad_norm": 1.7043125629425049,
"learning_rate": 2.334141896314057e-05,
"loss": 0.4335,
"step": 700
},
{
"epoch": 8.0,
"grad_norm": 1.2614474296569824,
"learning_rate": 1.1574654139046171e-05,
"loss": 0.362,
"step": 800
},
{
"epoch": 9.0,
"grad_norm": 1.796047329902649,
"learning_rate": 3.569229892949133e-06,
"loss": 0.3259,
"step": 900
},
{
"epoch": 10.0,
"grad_norm": 0.8333213925361633,
"learning_rate": 1.0877292712792585e-07,
"loss": 0.3123,
"step": 1000
}
],
"logging_steps": 500,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1919269665792000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}