|
{ |
|
"best_metric": 2.0116002559661865, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e2l57-l/checkpoint-5000", |
|
"epoch": 1.1974944731024317, |
|
"eval_steps": 500, |
|
"global_step": 6500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 0.00766215892508626, |
|
"learning_rate": 4.769712601326456e-05, |
|
"loss": 0.7207, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 2.078455924987793, |
|
"eval_runtime": 73.0449, |
|
"eval_samples_per_second": 16.524, |
|
"eval_steps_per_second": 2.067, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 0.016227997839450836, |
|
"learning_rate": 4.539425202652911e-05, |
|
"loss": 0.702, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 2.0786073207855225, |
|
"eval_runtime": 73.7466, |
|
"eval_samples_per_second": 16.367, |
|
"eval_steps_per_second": 2.048, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 3.326040029525757, |
|
"learning_rate": 4.309137803979367e-05, |
|
"loss": 0.7058, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 2.0656001567840576, |
|
"eval_runtime": 75.4832, |
|
"eval_samples_per_second": 15.99, |
|
"eval_steps_per_second": 2.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 5.95470666885376, |
|
"learning_rate": 4.078850405305822e-05, |
|
"loss": 0.7117, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 2.113447427749634, |
|
"eval_runtime": 75.7404, |
|
"eval_samples_per_second": 15.936, |
|
"eval_steps_per_second": 1.994, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 1.3789223432540894, |
|
"learning_rate": 3.848563006632277e-05, |
|
"loss": 0.7163, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 2.0306718349456787, |
|
"eval_runtime": 75.7822, |
|
"eval_samples_per_second": 15.927, |
|
"eval_steps_per_second": 1.993, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 17.15974998474121, |
|
"learning_rate": 3.6182756079587326e-05, |
|
"loss": 0.6794, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 2.613356590270996, |
|
"eval_runtime": 75.7313, |
|
"eval_samples_per_second": 15.938, |
|
"eval_steps_per_second": 1.994, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 6.376380443572998, |
|
"learning_rate": 3.3879882092851885e-05, |
|
"loss": 0.6818, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 2.0862319469451904, |
|
"eval_runtime": 75.9967, |
|
"eval_samples_per_second": 15.882, |
|
"eval_steps_per_second": 1.987, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 4.5618391036987305, |
|
"learning_rate": 3.157700810611644e-05, |
|
"loss": 0.6693, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 2.0294671058654785, |
|
"eval_runtime": 74.4695, |
|
"eval_samples_per_second": 16.208, |
|
"eval_steps_per_second": 2.028, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 4.356328964233398, |
|
"learning_rate": 2.927413411938099e-05, |
|
"loss": 0.6798, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 2.1398818492889404, |
|
"eval_runtime": 76.5616, |
|
"eval_samples_per_second": 15.765, |
|
"eval_steps_per_second": 1.972, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 1.5961418151855469, |
|
"learning_rate": 2.6971260132645544e-05, |
|
"loss": 0.6915, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 2.0116002559661865, |
|
"eval_runtime": 75.2588, |
|
"eval_samples_per_second": 16.038, |
|
"eval_steps_per_second": 2.006, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.672991931438446, |
|
"learning_rate": 2.4668386145910096e-05, |
|
"loss": 0.6623, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 2.1612467765808105, |
|
"eval_runtime": 75.2829, |
|
"eval_samples_per_second": 16.033, |
|
"eval_steps_per_second": 2.006, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 5.576838493347168, |
|
"learning_rate": 2.2365512159174652e-05, |
|
"loss": 0.6803, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 2.0493156909942627, |
|
"eval_runtime": 74.734, |
|
"eval_samples_per_second": 16.151, |
|
"eval_steps_per_second": 2.02, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 1.1342592239379883, |
|
"learning_rate": 2.0062638172439205e-05, |
|
"loss": 0.6729, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 2.051811933517456, |
|
"eval_runtime": 74.1516, |
|
"eval_samples_per_second": 16.277, |
|
"eval_steps_per_second": 2.036, |
|
"step": 6500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 10856, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2339148799892520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|