|
{ |
|
"best_metric": 0.5308398008346558, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l57-l/checkpoint-16000", |
|
"epoch": 2.9476787030213707, |
|
"eval_steps": 500, |
|
"global_step": 16000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 2.6780805587768555, |
|
"learning_rate": 4.84647506755097e-07, |
|
"loss": 0.2705, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.0680681467056274, |
|
"eval_runtime": 74.5723, |
|
"eval_samples_per_second": 16.186, |
|
"eval_steps_per_second": 2.025, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 8.301130294799805, |
|
"learning_rate": 4.69295013510194e-07, |
|
"loss": 0.2545, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 0.9443553686141968, |
|
"eval_runtime": 74.9643, |
|
"eval_samples_per_second": 16.101, |
|
"eval_steps_per_second": 2.014, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.018898434937000275, |
|
"learning_rate": 4.5394252026529107e-07, |
|
"loss": 0.234, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 0.876915693283081, |
|
"eval_runtime": 75.6401, |
|
"eval_samples_per_second": 15.957, |
|
"eval_steps_per_second": 1.996, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 0.00010984255641233176, |
|
"learning_rate": 4.385900270203881e-07, |
|
"loss": 0.1539, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 0.8415330648422241, |
|
"eval_runtime": 76.9959, |
|
"eval_samples_per_second": 15.676, |
|
"eval_steps_per_second": 1.961, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 395.890625, |
|
"learning_rate": 4.232375337754851e-07, |
|
"loss": 0.1766, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 0.7660450339317322, |
|
"eval_runtime": 76.733, |
|
"eval_samples_per_second": 15.73, |
|
"eval_steps_per_second": 1.968, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 363.6924743652344, |
|
"learning_rate": 4.0788504053058217e-07, |
|
"loss": 0.1679, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 0.7269155383110046, |
|
"eval_runtime": 76.7928, |
|
"eval_samples_per_second": 15.718, |
|
"eval_steps_per_second": 1.966, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 28.914026260375977, |
|
"learning_rate": 3.925325472856792e-07, |
|
"loss": 0.1104, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 0.7097663283348083, |
|
"eval_runtime": 76.6897, |
|
"eval_samples_per_second": 15.739, |
|
"eval_steps_per_second": 1.969, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 116.85651397705078, |
|
"learning_rate": 3.771800540407762e-07, |
|
"loss": 0.1367, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 0.6968969106674194, |
|
"eval_runtime": 77.1872, |
|
"eval_samples_per_second": 15.637, |
|
"eval_steps_per_second": 1.956, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 391.0343322753906, |
|
"learning_rate": 3.618275607958732e-07, |
|
"loss": 0.1129, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 0.6776800751686096, |
|
"eval_runtime": 75.0351, |
|
"eval_samples_per_second": 16.086, |
|
"eval_steps_per_second": 2.012, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 2.0514346033451147e-05, |
|
"learning_rate": 3.464750675509703e-07, |
|
"loss": 0.1125, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 0.6658498048782349, |
|
"eval_runtime": 77.111, |
|
"eval_samples_per_second": 15.653, |
|
"eval_steps_per_second": 1.958, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.000568103976547718, |
|
"learning_rate": 3.311225743060673e-07, |
|
"loss": 0.1071, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 0.6465296149253845, |
|
"eval_runtime": 75.2951, |
|
"eval_samples_per_second": 16.03, |
|
"eval_steps_per_second": 2.005, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 0.02698369137942791, |
|
"learning_rate": 3.157700810611643e-07, |
|
"loss": 0.0553, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 0.6357071995735168, |
|
"eval_runtime": 76.6726, |
|
"eval_samples_per_second": 15.742, |
|
"eval_steps_per_second": 1.969, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 0.0005627564387395978, |
|
"learning_rate": 3.0041758781626137e-07, |
|
"loss": 0.0729, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 0.628384530544281, |
|
"eval_runtime": 75.6565, |
|
"eval_samples_per_second": 15.954, |
|
"eval_steps_per_second": 1.996, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 4.7338733111246256e-07, |
|
"learning_rate": 2.8506509457135833e-07, |
|
"loss": 0.0476, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 0.6259528994560242, |
|
"eval_runtime": 75.2674, |
|
"eval_samples_per_second": 16.036, |
|
"eval_steps_per_second": 2.006, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 28.43704605102539, |
|
"learning_rate": 2.697126013264554e-07, |
|
"loss": 0.0756, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 0.6102285981178284, |
|
"eval_runtime": 76.1062, |
|
"eval_samples_per_second": 15.859, |
|
"eval_steps_per_second": 1.984, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.0002958567056339234, |
|
"learning_rate": 2.5436010808155247e-07, |
|
"loss": 0.0797, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.6023213267326355, |
|
"eval_runtime": 75.1644, |
|
"eval_samples_per_second": 16.058, |
|
"eval_steps_per_second": 2.009, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 0.0021068258211016655, |
|
"learning_rate": 2.390076148366495e-07, |
|
"loss": 0.0536, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 0.5879408121109009, |
|
"eval_runtime": 76.0196, |
|
"eval_samples_per_second": 15.877, |
|
"eval_steps_per_second": 1.986, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 4.1152270569000393e-07, |
|
"learning_rate": 2.236551215917465e-07, |
|
"loss": 0.0784, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.5879626274108887, |
|
"eval_runtime": 75.6453, |
|
"eval_samples_per_second": 15.956, |
|
"eval_steps_per_second": 1.996, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"grad_norm": 2.4213447913368213e-10, |
|
"learning_rate": 2.083026283468435e-07, |
|
"loss": 0.0703, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"eval_loss": 0.5665237307548523, |
|
"eval_runtime": 74.9732, |
|
"eval_samples_per_second": 16.099, |
|
"eval_steps_per_second": 2.014, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"grad_norm": 5.074160799267702e-05, |
|
"learning_rate": 1.9295013510194055e-07, |
|
"loss": 0.0551, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"eval_loss": 0.5670996308326721, |
|
"eval_runtime": 75.902, |
|
"eval_samples_per_second": 15.902, |
|
"eval_steps_per_second": 1.989, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"grad_norm": 6.560769350016926e-08, |
|
"learning_rate": 1.7759764185703757e-07, |
|
"loss": 0.0852, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"eval_loss": 0.5694836974143982, |
|
"eval_runtime": 76.048, |
|
"eval_samples_per_second": 15.872, |
|
"eval_steps_per_second": 1.986, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"grad_norm": 4.498223916016286e-06, |
|
"learning_rate": 1.6224514861213458e-07, |
|
"loss": 0.0546, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"eval_loss": 0.555758535861969, |
|
"eval_runtime": 74.9986, |
|
"eval_samples_per_second": 16.094, |
|
"eval_steps_per_second": 2.013, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"grad_norm": 273.0765380859375, |
|
"learning_rate": 1.4689265536723165e-07, |
|
"loss": 0.0369, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"eval_loss": 0.5532959699630737, |
|
"eval_runtime": 74.8225, |
|
"eval_samples_per_second": 16.132, |
|
"eval_steps_per_second": 2.018, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"grad_norm": 0.9755889773368835, |
|
"learning_rate": 1.3154016212232866e-07, |
|
"loss": 0.0205, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"eval_loss": 0.5498348474502563, |
|
"eval_runtime": 76.4506, |
|
"eval_samples_per_second": 15.788, |
|
"eval_steps_per_second": 1.975, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"grad_norm": 2.7215068340301514, |
|
"learning_rate": 1.1618766887742569e-07, |
|
"loss": 0.0673, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"eval_loss": 0.5445749759674072, |
|
"eval_runtime": 74.9944, |
|
"eval_samples_per_second": 16.095, |
|
"eval_steps_per_second": 2.013, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"grad_norm": 0.0005020984681323171, |
|
"learning_rate": 1.0083517563252273e-07, |
|
"loss": 0.0509, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"eval_loss": 0.5434286594390869, |
|
"eval_runtime": 75.0908, |
|
"eval_samples_per_second": 16.074, |
|
"eval_steps_per_second": 2.011, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.4871039056742816, |
|
"grad_norm": 0.00025747253675945103, |
|
"learning_rate": 8.548268238761974e-08, |
|
"loss": 0.0447, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.4871039056742816, |
|
"eval_loss": 0.540378749370575, |
|
"eval_runtime": 76.2972, |
|
"eval_samples_per_second": 15.82, |
|
"eval_steps_per_second": 1.979, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.5792188651436994, |
|
"grad_norm": 0.01660293899476528, |
|
"learning_rate": 7.013018914271677e-08, |
|
"loss": 0.0246, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.5792188651436994, |
|
"eval_loss": 0.5360240936279297, |
|
"eval_runtime": 74.959, |
|
"eval_samples_per_second": 16.102, |
|
"eval_steps_per_second": 2.014, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.671333824613117, |
|
"grad_norm": 4.063962055766979e-09, |
|
"learning_rate": 5.47776958978138e-08, |
|
"loss": 0.0395, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.671333824613117, |
|
"eval_loss": 0.533521294593811, |
|
"eval_runtime": 75.1953, |
|
"eval_samples_per_second": 16.052, |
|
"eval_steps_per_second": 2.008, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.7634487840825352, |
|
"grad_norm": 8.483572173645371e-07, |
|
"learning_rate": 3.942520265291083e-08, |
|
"loss": 0.0436, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.7634487840825352, |
|
"eval_loss": 0.5332146286964417, |
|
"eval_runtime": 76.4165, |
|
"eval_samples_per_second": 15.795, |
|
"eval_steps_per_second": 1.976, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.855563743551953, |
|
"grad_norm": 5.3192995147499644e-11, |
|
"learning_rate": 2.407270940800786e-08, |
|
"loss": 0.0398, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.855563743551953, |
|
"eval_loss": 0.5320385098457336, |
|
"eval_runtime": 74.9122, |
|
"eval_samples_per_second": 16.112, |
|
"eval_steps_per_second": 2.016, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.9476787030213707, |
|
"grad_norm": 3.756736993789673, |
|
"learning_rate": 8.720216163104888e-09, |
|
"loss": 0.0427, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.9476787030213707, |
|
"eval_loss": 0.5308398008346558, |
|
"eval_runtime": 75.4523, |
|
"eval_samples_per_second": 15.997, |
|
"eval_steps_per_second": 2.001, |
|
"step": 16000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16284, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5756853136862520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|