|
{ |
|
"best_metric": 4.576775972964242e-05, |
|
"best_model_checkpoint": "llava-1.5-7b-sroie/checkpoint-1400", |
|
"epoch": 11.2, |
|
"eval_steps": 100, |
|
"global_step": 1400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2929452955722809, |
|
"learning_rate": 1.3776e-05, |
|
"loss": 0.572, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 0.03328470513224602, |
|
"eval_runtime": 60.4001, |
|
"eval_samples_per_second": 2.086, |
|
"eval_steps_per_second": 2.086, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.01092437468469143, |
|
"learning_rate": 1.3552e-05, |
|
"loss": 0.0046, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 0.0008874661289155483, |
|
"eval_runtime": 61.352, |
|
"eval_samples_per_second": 2.054, |
|
"eval_steps_per_second": 2.054, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.005791354924440384, |
|
"learning_rate": 1.3327999999999998e-05, |
|
"loss": 0.0007, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 0.0004409343237057328, |
|
"eval_runtime": 61.1557, |
|
"eval_samples_per_second": 2.06, |
|
"eval_steps_per_second": 2.06, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.002793485065922141, |
|
"learning_rate": 1.3104e-05, |
|
"loss": 0.0004, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 0.00029093530611135066, |
|
"eval_runtime": 60.6893, |
|
"eval_samples_per_second": 2.076, |
|
"eval_steps_per_second": 2.076, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.0015204106457531452, |
|
"learning_rate": 1.288e-05, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.0002039903192780912, |
|
"eval_runtime": 61.3698, |
|
"eval_samples_per_second": 2.053, |
|
"eval_steps_per_second": 2.053, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.0009014272363856435, |
|
"learning_rate": 1.2656e-05, |
|
"loss": 0.0002, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"eval_loss": 0.00016169117589015514, |
|
"eval_runtime": 60.6043, |
|
"eval_samples_per_second": 2.079, |
|
"eval_steps_per_second": 2.079, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"grad_norm": 0.0006670297589153051, |
|
"learning_rate": 1.2432e-05, |
|
"loss": 0.0001, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"eval_loss": 0.0001274394162464887, |
|
"eval_runtime": 60.5564, |
|
"eval_samples_per_second": 2.081, |
|
"eval_steps_per_second": 2.081, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 0.00041761380271054804, |
|
"learning_rate": 1.2207999999999999e-05, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"eval_loss": 0.000104410013591405, |
|
"eval_runtime": 60.3212, |
|
"eval_samples_per_second": 2.089, |
|
"eval_steps_per_second": 2.089, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 0.0005114951636642218, |
|
"learning_rate": 1.1983999999999999e-05, |
|
"loss": 0.0001, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"eval_loss": 8.86739362613298e-05, |
|
"eval_runtime": 60.7862, |
|
"eval_samples_per_second": 2.073, |
|
"eval_steps_per_second": 2.073, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.00042668674723245203, |
|
"learning_rate": 1.176e-05, |
|
"loss": 0.0001, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 7.54967040847987e-05, |
|
"eval_runtime": 60.5531, |
|
"eval_samples_per_second": 2.081, |
|
"eval_steps_per_second": 2.081, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"grad_norm": 0.0005093810614198446, |
|
"learning_rate": 1.1536e-05, |
|
"loss": 0.0001, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"eval_loss": 6.56623815302737e-05, |
|
"eval_runtime": 60.7572, |
|
"eval_samples_per_second": 2.074, |
|
"eval_steps_per_second": 2.074, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"grad_norm": 0.00028865510830655694, |
|
"learning_rate": 1.1312000000000001e-05, |
|
"loss": 0.0001, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"eval_loss": 5.7752622524276376e-05, |
|
"eval_runtime": 61.9729, |
|
"eval_samples_per_second": 2.033, |
|
"eval_steps_per_second": 2.033, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"grad_norm": 0.00028205872513353825, |
|
"learning_rate": 1.1088e-05, |
|
"loss": 0.0001, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"eval_loss": 5.1226033974671736e-05, |
|
"eval_runtime": 60.9193, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 2.068, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"grad_norm": 0.00026295348652638495, |
|
"learning_rate": 1.0864e-05, |
|
"loss": 0.0, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"eval_loss": 4.576775972964242e-05, |
|
"eval_runtime": 61.4909, |
|
"eval_samples_per_second": 2.049, |
|
"eval_steps_per_second": 2.049, |
|
"step": 1400 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 6250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.025909547008e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|