|
{ |
|
"best_metric": 0.530552089214325, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l57-l/checkpoint-13000", |
|
"epoch": 2.3949889462048635, |
|
"eval_steps": 500, |
|
"global_step": 13000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 3.0293657779693604, |
|
"learning_rate": 4.884856300663227e-07, |
|
"loss": 0.271, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.069305419921875, |
|
"eval_runtime": 75.1409, |
|
"eval_samples_per_second": 16.063, |
|
"eval_steps_per_second": 2.01, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 7.3040852546691895, |
|
"learning_rate": 4.769712601326456e-07, |
|
"loss": 0.2493, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 0.9427077770233154, |
|
"eval_runtime": 74.8926, |
|
"eval_samples_per_second": 16.116, |
|
"eval_steps_per_second": 2.016, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.020736657083034515, |
|
"learning_rate": 4.654568901989683e-07, |
|
"loss": 0.2348, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 0.8726764917373657, |
|
"eval_runtime": 76.6621, |
|
"eval_samples_per_second": 15.744, |
|
"eval_steps_per_second": 1.97, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 5.3738909628009424e-05, |
|
"learning_rate": 4.5394252026529107e-07, |
|
"loss": 0.1552, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 0.8325753808021545, |
|
"eval_runtime": 76.6127, |
|
"eval_samples_per_second": 15.755, |
|
"eval_steps_per_second": 1.971, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 404.510009765625, |
|
"learning_rate": 4.4242815033161386e-07, |
|
"loss": 0.1753, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 0.7550467848777771, |
|
"eval_runtime": 76.8024, |
|
"eval_samples_per_second": 15.716, |
|
"eval_steps_per_second": 1.966, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 370.7579650878906, |
|
"learning_rate": 4.3091378039793665e-07, |
|
"loss": 0.1659, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 0.719167172908783, |
|
"eval_runtime": 76.6327, |
|
"eval_samples_per_second": 15.75, |
|
"eval_steps_per_second": 1.97, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 24.97766876220703, |
|
"learning_rate": 4.193994104642594e-07, |
|
"loss": 0.105, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 0.7117515802383423, |
|
"eval_runtime": 77.1286, |
|
"eval_samples_per_second": 15.649, |
|
"eval_steps_per_second": 1.958, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 305.62060546875, |
|
"learning_rate": 4.0788504053058217e-07, |
|
"loss": 0.1336, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 0.6952972412109375, |
|
"eval_runtime": 76.7253, |
|
"eval_samples_per_second": 15.731, |
|
"eval_steps_per_second": 1.968, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 506.12237548828125, |
|
"learning_rate": 3.9637067059690496e-07, |
|
"loss": 0.1154, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 0.6744509935379028, |
|
"eval_runtime": 76.9493, |
|
"eval_samples_per_second": 15.686, |
|
"eval_steps_per_second": 1.962, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 8.823301322991028e-05, |
|
"learning_rate": 3.8485630066322774e-07, |
|
"loss": 0.108, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 0.6559990048408508, |
|
"eval_runtime": 75.2391, |
|
"eval_samples_per_second": 16.042, |
|
"eval_steps_per_second": 2.007, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.0008560372516512871, |
|
"learning_rate": 3.733419307295504e-07, |
|
"loss": 0.1106, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 0.6367496848106384, |
|
"eval_runtime": 77.0031, |
|
"eval_samples_per_second": 15.675, |
|
"eval_steps_per_second": 1.961, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 0.04510360211133957, |
|
"learning_rate": 3.618275607958732e-07, |
|
"loss": 0.0591, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 0.6259325742721558, |
|
"eval_runtime": 75.2787, |
|
"eval_samples_per_second": 16.034, |
|
"eval_steps_per_second": 2.006, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 0.0005158882704563439, |
|
"learning_rate": 3.50313190862196e-07, |
|
"loss": 0.0745, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 0.6209793090820312, |
|
"eval_runtime": 77.1989, |
|
"eval_samples_per_second": 15.635, |
|
"eval_steps_per_second": 1.956, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 3.5883346072296263e-07, |
|
"learning_rate": 3.387988209285188e-07, |
|
"loss": 0.0502, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 0.6133300065994263, |
|
"eval_runtime": 75.1116, |
|
"eval_samples_per_second": 16.069, |
|
"eval_steps_per_second": 2.01, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 21.66597557067871, |
|
"learning_rate": 3.272844509948415e-07, |
|
"loss": 0.079, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 0.6006803512573242, |
|
"eval_runtime": 75.0086, |
|
"eval_samples_per_second": 16.091, |
|
"eval_steps_per_second": 2.013, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 7.992870814632624e-05, |
|
"learning_rate": 3.157700810611643e-07, |
|
"loss": 0.0776, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.5866427421569824, |
|
"eval_runtime": 75.6702, |
|
"eval_samples_per_second": 15.951, |
|
"eval_steps_per_second": 1.996, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 0.0010381735628470778, |
|
"learning_rate": 3.042557111274871e-07, |
|
"loss": 0.0492, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 0.5679212212562561, |
|
"eval_runtime": 75.0643, |
|
"eval_samples_per_second": 16.08, |
|
"eval_steps_per_second": 2.012, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 1.733377956725235e-07, |
|
"learning_rate": 2.927413411938099e-07, |
|
"loss": 0.0794, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.5761673450469971, |
|
"eval_runtime": 76.2738, |
|
"eval_samples_per_second": 15.825, |
|
"eval_steps_per_second": 1.98, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"grad_norm": 3.1063287359023306e-11, |
|
"learning_rate": 2.812269712601326e-07, |
|
"loss": 0.0677, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"eval_loss": 0.5565799474716187, |
|
"eval_runtime": 75.6897, |
|
"eval_samples_per_second": 15.947, |
|
"eval_steps_per_second": 1.995, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"grad_norm": 0.00010032750287791714, |
|
"learning_rate": 2.697126013264554e-07, |
|
"loss": 0.0566, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"eval_loss": 0.5482199192047119, |
|
"eval_runtime": 75.0029, |
|
"eval_samples_per_second": 16.093, |
|
"eval_steps_per_second": 2.013, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"grad_norm": 5.553330595375883e-08, |
|
"learning_rate": 2.581982313927782e-07, |
|
"loss": 0.0828, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"eval_loss": 0.5500391125679016, |
|
"eval_runtime": 76.2612, |
|
"eval_samples_per_second": 15.827, |
|
"eval_steps_per_second": 1.98, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"grad_norm": 1.5537947319899104e-06, |
|
"learning_rate": 2.466838614591009e-07, |
|
"loss": 0.0573, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"eval_loss": 0.5341849327087402, |
|
"eval_runtime": 75.6131, |
|
"eval_samples_per_second": 15.963, |
|
"eval_steps_per_second": 1.997, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"grad_norm": 36.10622024536133, |
|
"learning_rate": 2.3516949152542374e-07, |
|
"loss": 0.0401, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"eval_loss": 0.5351412296295166, |
|
"eval_runtime": 74.8873, |
|
"eval_samples_per_second": 16.118, |
|
"eval_steps_per_second": 2.016, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"grad_norm": 0.49580878019332886, |
|
"learning_rate": 2.236551215917465e-07, |
|
"loss": 0.0152, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"eval_loss": 0.5349487662315369, |
|
"eval_runtime": 76.3109, |
|
"eval_samples_per_second": 15.817, |
|
"eval_steps_per_second": 1.979, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"grad_norm": 1.7469781637191772, |
|
"learning_rate": 2.1214075165806928e-07, |
|
"loss": 0.0638, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"eval_loss": 0.531768798828125, |
|
"eval_runtime": 75.5712, |
|
"eval_samples_per_second": 15.972, |
|
"eval_steps_per_second": 1.998, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"grad_norm": 0.0006845183088444173, |
|
"learning_rate": 2.0062638172439202e-07, |
|
"loss": 0.0488, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"eval_loss": 0.530552089214325, |
|
"eval_runtime": 75.6479, |
|
"eval_samples_per_second": 15.955, |
|
"eval_steps_per_second": 1.996, |
|
"step": 13000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 21712, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4677578083082520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|