|
{ |
|
"best_metric": 0.9142857142857143, |
|
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-148", |
|
"epoch": 3.22972972972973, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 5.877252101898193, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.3334, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 6.845406532287598, |
|
"learning_rate": 4.81203007518797e-05, |
|
"loss": 2.2303, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 8.416325569152832, |
|
"learning_rate": 4.43609022556391e-05, |
|
"loss": 2.1155, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.6, |
|
"eval_loss": 1.7389822006225586, |
|
"eval_runtime": 7.8855, |
|
"eval_samples_per_second": 8.877, |
|
"eval_steps_per_second": 1.141, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 10.510369300842285, |
|
"learning_rate": 4.0601503759398494e-05, |
|
"loss": 1.9057, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 12.991025924682617, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 1.4769, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 10.709514617919922, |
|
"learning_rate": 3.3082706766917295e-05, |
|
"loss": 1.1806, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 6.943473815917969, |
|
"learning_rate": 2.9323308270676693e-05, |
|
"loss": 0.8482, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.6857142857142857, |
|
"eval_loss": 0.954380452632904, |
|
"eval_runtime": 7.9006, |
|
"eval_samples_per_second": 8.86, |
|
"eval_steps_per_second": 1.139, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 4.553641319274902, |
|
"learning_rate": 2.556390977443609e-05, |
|
"loss": 0.8814, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"grad_norm": 6.816486835479736, |
|
"learning_rate": 2.1804511278195487e-05, |
|
"loss": 0.6541, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 14.017226219177246, |
|
"learning_rate": 1.8045112781954888e-05, |
|
"loss": 0.5383, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 6.129795074462891, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.4371, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_accuracy": 0.8857142857142857, |
|
"eval_loss": 0.4790053367614746, |
|
"eval_runtime": 7.8047, |
|
"eval_samples_per_second": 8.969, |
|
"eval_steps_per_second": 1.153, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"grad_norm": 4.292072772979736, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.3274, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"grad_norm": 3.278257369995117, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 0.3116, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"grad_norm": 6.118241310119629, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 0.2803, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.9142857142857143, |
|
"eval_loss": 0.3659238815307617, |
|
"eval_runtime": 7.3677, |
|
"eval_samples_per_second": 9.501, |
|
"eval_steps_per_second": 1.222, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"step": 148, |
|
"total_flos": 1.460491890402263e+18, |
|
"train_loss": 1.0632461502745345, |
|
"train_runtime": 248.2758, |
|
"train_samples_per_second": 4.769, |
|
"train_steps_per_second": 0.596 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.8774193548387097, |
|
"eval_loss": 0.45682066679000854, |
|
"eval_runtime": 16.891, |
|
"eval_samples_per_second": 9.176, |
|
"eval_steps_per_second": 1.184, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.8774193548387097, |
|
"eval_loss": 0.45682066679000854, |
|
"eval_runtime": 15.7238, |
|
"eval_samples_per_second": 9.858, |
|
"eval_steps_per_second": 1.272, |
|
"step": 148 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 1.460491890402263e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|