|
{ |
|
"best_metric": 0.534845769405365, |
|
"best_model_checkpoint": "./vit-epsilon-1e-7/checkpoint-4173", |
|
"epoch": 23.0, |
|
"eval_steps": 500, |
|
"global_step": 7383, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 26.270666122436523, |
|
"learning_rate": 1.8349682631275247e-05, |
|
"loss": 1.765, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.691747572815534, |
|
"eval_f1": 0.6531054572377663, |
|
"eval_loss": 0.9570140838623047, |
|
"eval_precision": 0.6486719468870186, |
|
"eval_recall": 0.691747572815534, |
|
"eval_runtime": 22.6395, |
|
"eval_samples_per_second": 127.388, |
|
"eval_steps_per_second": 15.946, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 23.715213775634766, |
|
"learning_rate": 3.6872475476053084e-05, |
|
"loss": 1.1815, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7558945908460472, |
|
"eval_f1": 0.7314126926737505, |
|
"eval_loss": 0.6948704123497009, |
|
"eval_precision": 0.7314749711745714, |
|
"eval_recall": 0.7558945908460472, |
|
"eval_runtime": 22.8506, |
|
"eval_samples_per_second": 126.211, |
|
"eval_steps_per_second": 15.798, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 5.459173202514648, |
|
"learning_rate": 5.539526832083093e-05, |
|
"loss": 1.0605, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.771497919556172, |
|
"eval_f1": 0.752980330371167, |
|
"eval_loss": 0.6212790012359619, |
|
"eval_precision": 0.7648636119210427, |
|
"eval_recall": 0.771497919556172, |
|
"eval_runtime": 22.9013, |
|
"eval_samples_per_second": 125.932, |
|
"eval_steps_per_second": 15.763, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 23.622180938720703, |
|
"learning_rate": 7.391806116560877e-05, |
|
"loss": 1.0147, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7732316227461858, |
|
"eval_f1": 0.773441625340043, |
|
"eval_loss": 0.5767605304718018, |
|
"eval_precision": 0.7806580682757648, |
|
"eval_recall": 0.7732316227461858, |
|
"eval_runtime": 22.9855, |
|
"eval_samples_per_second": 125.47, |
|
"eval_steps_per_second": 15.706, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 9.584253311157227, |
|
"learning_rate": 9.244085401038661e-05, |
|
"loss": 0.93, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7586685159500693, |
|
"eval_f1": 0.7661547242155936, |
|
"eval_loss": 0.657228946685791, |
|
"eval_precision": 0.7939813909551887, |
|
"eval_recall": 0.7586685159500693, |
|
"eval_runtime": 22.8929, |
|
"eval_samples_per_second": 125.978, |
|
"eval_steps_per_second": 15.769, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 13.967341423034668, |
|
"learning_rate": 9.984553168214161e-05, |
|
"loss": 0.9793, |
|
"step": 1926 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7701109570041609, |
|
"eval_f1": 0.7742181002637638, |
|
"eval_loss": 0.61651611328125, |
|
"eval_precision": 0.7940357585259091, |
|
"eval_recall": 0.7701109570041609, |
|
"eval_runtime": 22.9308, |
|
"eval_samples_per_second": 125.77, |
|
"eval_steps_per_second": 15.743, |
|
"step": 1926 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 12.87781810760498, |
|
"learning_rate": 9.889061131437471e-05, |
|
"loss": 0.8662, |
|
"step": 2247 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.723994452149792, |
|
"eval_f1": 0.745594196112459, |
|
"eval_loss": 0.6534591913223267, |
|
"eval_precision": 0.8097504651538117, |
|
"eval_recall": 0.723994452149792, |
|
"eval_runtime": 24.6973, |
|
"eval_samples_per_second": 116.774, |
|
"eval_steps_per_second": 14.617, |
|
"step": 2247 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 3.272010326385498, |
|
"learning_rate": 9.707265436104638e-05, |
|
"loss": 0.7767, |
|
"step": 2568 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7565880721220527, |
|
"eval_f1": 0.773329540133459, |
|
"eval_loss": 0.5812659859657288, |
|
"eval_precision": 0.8124371295597024, |
|
"eval_recall": 0.7565880721220527, |
|
"eval_runtime": 24.6374, |
|
"eval_samples_per_second": 117.058, |
|
"eval_steps_per_second": 14.653, |
|
"step": 2568 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 12.090767860412598, |
|
"learning_rate": 9.44253127296151e-05, |
|
"loss": 0.7572, |
|
"step": 2889 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8144937586685159, |
|
"eval_f1": 0.811409191547864, |
|
"eval_loss": 0.5385076403617859, |
|
"eval_precision": 0.8131138362731425, |
|
"eval_recall": 0.8144937586685159, |
|
"eval_runtime": 23.005, |
|
"eval_samples_per_second": 125.364, |
|
"eval_steps_per_second": 15.692, |
|
"step": 2889 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 9.961050987243652, |
|
"learning_rate": 9.099523058358976e-05, |
|
"loss": 0.7003, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8027045769764216, |
|
"eval_f1": 0.809293314931955, |
|
"eval_loss": 0.5354512929916382, |
|
"eval_precision": 0.8276170557646749, |
|
"eval_recall": 0.8027045769764216, |
|
"eval_runtime": 23.6688, |
|
"eval_samples_per_second": 121.848, |
|
"eval_steps_per_second": 15.252, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 25.105077743530273, |
|
"learning_rate": 8.684284338417735e-05, |
|
"loss": 0.6316, |
|
"step": 3531 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.765256588072122, |
|
"eval_f1": 0.7816439176174772, |
|
"eval_loss": 0.6284907460212708, |
|
"eval_precision": 0.8322254365589742, |
|
"eval_recall": 0.765256588072122, |
|
"eval_runtime": 23.5967, |
|
"eval_samples_per_second": 122.22, |
|
"eval_steps_per_second": 15.299, |
|
"step": 3531 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 10.844632148742676, |
|
"learning_rate": 8.204131306302357e-05, |
|
"loss": 0.5723, |
|
"step": 3852 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8016643550624133, |
|
"eval_f1": 0.8104537685542633, |
|
"eval_loss": 0.5774895548820496, |
|
"eval_precision": 0.8279128953158427, |
|
"eval_recall": 0.8016643550624133, |
|
"eval_runtime": 23.4985, |
|
"eval_samples_per_second": 122.731, |
|
"eval_steps_per_second": 15.363, |
|
"step": 3852 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 11.590338706970215, |
|
"learning_rate": 7.667523896413962e-05, |
|
"loss": 0.4717, |
|
"step": 4173 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.8349514563106796, |
|
"eval_f1": 0.8349682338681513, |
|
"eval_loss": 0.534845769405365, |
|
"eval_precision": 0.8390873801645569, |
|
"eval_recall": 0.8349514563106796, |
|
"eval_runtime": 23.9213, |
|
"eval_samples_per_second": 120.562, |
|
"eval_steps_per_second": 15.091, |
|
"step": 4173 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 3.6801199913024902, |
|
"learning_rate": 7.083916726724684e-05, |
|
"loss": 0.4472, |
|
"step": 4494 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8238557558945908, |
|
"eval_f1": 0.8298970188071358, |
|
"eval_loss": 0.5469394326210022, |
|
"eval_precision": 0.844241279806222, |
|
"eval_recall": 0.8238557558945908, |
|
"eval_runtime": 23.6076, |
|
"eval_samples_per_second": 122.164, |
|
"eval_steps_per_second": 15.292, |
|
"step": 4494 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 2.446948528289795, |
|
"learning_rate": 6.46557084486047e-05, |
|
"loss": 0.3764, |
|
"step": 4815 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.8290568654646324, |
|
"eval_f1": 0.8355171455268711, |
|
"eval_loss": 0.5737930536270142, |
|
"eval_precision": 0.8501341497772392, |
|
"eval_recall": 0.8290568654646324, |
|
"eval_runtime": 23.7441, |
|
"eval_samples_per_second": 121.462, |
|
"eval_steps_per_second": 15.204, |
|
"step": 4815 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 6.043246269226074, |
|
"learning_rate": 5.8195220793532045e-05, |
|
"loss": 0.3346, |
|
"step": 5136 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.8436199722607489, |
|
"eval_f1": 0.8461452975648701, |
|
"eval_loss": 0.5367853045463562, |
|
"eval_precision": 0.851244402940535, |
|
"eval_recall": 0.8436199722607489, |
|
"eval_runtime": 23.9285, |
|
"eval_samples_per_second": 120.526, |
|
"eval_steps_per_second": 15.087, |
|
"step": 5136 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"grad_norm": 15.190231323242188, |
|
"learning_rate": 5.15903395270923e-05, |
|
"loss": 0.2826, |
|
"step": 5457 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.8474341192787794, |
|
"eval_f1": 0.8467916215502757, |
|
"eval_loss": 0.5510370135307312, |
|
"eval_precision": 0.8489075342171026, |
|
"eval_recall": 0.8474341192787794, |
|
"eval_runtime": 23.9505, |
|
"eval_samples_per_second": 120.415, |
|
"eval_steps_per_second": 15.073, |
|
"step": 5457 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 24.944795608520508, |
|
"learning_rate": 4.4978023449711664e-05, |
|
"loss": 0.2659, |
|
"step": 5778 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.8547156726768377, |
|
"eval_f1": 0.8549199048922214, |
|
"eval_loss": 0.546658456325531, |
|
"eval_precision": 0.8559700691467993, |
|
"eval_recall": 0.8547156726768377, |
|
"eval_runtime": 23.4765, |
|
"eval_samples_per_second": 122.846, |
|
"eval_steps_per_second": 15.377, |
|
"step": 5778 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"grad_norm": 0.11274675279855728, |
|
"learning_rate": 3.843351058509752e-05, |
|
"loss": 0.2545, |
|
"step": 6099 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.8432732316227461, |
|
"eval_f1": 0.8487015901360936, |
|
"eval_loss": 0.6155611872673035, |
|
"eval_precision": 0.8617407571979913, |
|
"eval_recall": 0.8432732316227461, |
|
"eval_runtime": 22.4381, |
|
"eval_samples_per_second": 128.532, |
|
"eval_steps_per_second": 16.089, |
|
"step": 6099 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.5206512808799744, |
|
"learning_rate": 3.209279054309716e-05, |
|
"loss": 0.2123, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.8429264909847434, |
|
"eval_f1": 0.8426622560864744, |
|
"eval_loss": 0.6871052384376526, |
|
"eval_precision": 0.8498556529498513, |
|
"eval_recall": 0.8429264909847434, |
|
"eval_runtime": 22.4536, |
|
"eval_samples_per_second": 128.443, |
|
"eval_steps_per_second": 16.078, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"grad_norm": 6.716039657592773, |
|
"learning_rate": 2.6067582030088143e-05, |
|
"loss": 0.1655, |
|
"step": 6741 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy": 0.8609570041608876, |
|
"eval_f1": 0.856691150022433, |
|
"eval_loss": 0.6139292120933533, |
|
"eval_precision": 0.8551833143301755, |
|
"eval_recall": 0.8609570041608876, |
|
"eval_runtime": 22.975, |
|
"eval_samples_per_second": 125.528, |
|
"eval_steps_per_second": 15.713, |
|
"step": 6741 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"grad_norm": 108.25263977050781, |
|
"learning_rate": 2.0464044677195966e-05, |
|
"loss": 0.1246, |
|
"step": 7062 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.8675450762829403, |
|
"eval_f1": 0.8676542597333672, |
|
"eval_loss": 0.6129117012023926, |
|
"eval_precision": 0.8680620732919173, |
|
"eval_recall": 0.8675450762829403, |
|
"eval_runtime": 22.6536, |
|
"eval_samples_per_second": 127.309, |
|
"eval_steps_per_second": 15.936, |
|
"step": 7062 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"grad_norm": 2.2161059379577637, |
|
"learning_rate": 1.538090858763548e-05, |
|
"loss": 0.1394, |
|
"step": 7383 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy": 0.8713592233009708, |
|
"eval_f1": 0.867730828379127, |
|
"eval_loss": 0.6522772908210754, |
|
"eval_precision": 0.8674975891372388, |
|
"eval_recall": 0.8713592233009708, |
|
"eval_runtime": 22.7054, |
|
"eval_samples_per_second": 127.018, |
|
"eval_steps_per_second": 15.899, |
|
"step": 7383 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"step": 7383, |
|
"total_flos": 9.140124804986585e+18, |
|
"train_loss": 0.6221655089083716, |
|
"train_runtime": 2787.435, |
|
"train_samples_per_second": 183.968, |
|
"train_steps_per_second": 11.516 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 32100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"total_flos": 9.140124804986585e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|