|
{ |
|
"best_metric": 0.49532002210617065, |
|
"best_model_checkpoint": "./vit-dropout-0.4/checkpoint-2889", |
|
"epoch": 19.0, |
|
"eval_steps": 500, |
|
"global_step": 6099, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 20.27271842956543, |
|
"learning_rate": 9.008498583569406e-05, |
|
"loss": 1.5638, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.625866851595007, |
|
"eval_f1": 0.6634599560471133, |
|
"eval_loss": 0.917059063911438, |
|
"eval_precision": 0.7843432532062684, |
|
"eval_recall": 0.625866851595007, |
|
"eval_runtime": 22.9894, |
|
"eval_samples_per_second": 125.449, |
|
"eval_steps_per_second": 15.703, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 2.8246476650238037, |
|
"learning_rate": 9.967994652315143e-05, |
|
"loss": 1.1092, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7007628294036061, |
|
"eval_f1": 0.7192900679194619, |
|
"eval_loss": 0.7738657593727112, |
|
"eval_precision": 0.7903482253631083, |
|
"eval_recall": 0.7007628294036061, |
|
"eval_runtime": 22.8225, |
|
"eval_samples_per_second": 126.367, |
|
"eval_steps_per_second": 15.818, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 4.247750282287598, |
|
"learning_rate": 9.856371224544644e-05, |
|
"loss": 0.9892, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7780859916782247, |
|
"eval_f1": 0.778423467149636, |
|
"eval_loss": 0.6145934462547302, |
|
"eval_precision": 0.790423999944827, |
|
"eval_recall": 0.7780859916782247, |
|
"eval_runtime": 22.6731, |
|
"eval_samples_per_second": 127.199, |
|
"eval_steps_per_second": 15.922, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 13.25033950805664, |
|
"learning_rate": 9.666449571439611e-05, |
|
"loss": 0.8735, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7454923717059639, |
|
"eval_f1": 0.7615557397273891, |
|
"eval_loss": 0.6262115836143494, |
|
"eval_precision": 0.807493921529863, |
|
"eval_recall": 0.7454923717059639, |
|
"eval_runtime": 22.9609, |
|
"eval_samples_per_second": 125.605, |
|
"eval_steps_per_second": 15.722, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 7.960892677307129, |
|
"learning_rate": 9.401291758831328e-05, |
|
"loss": 0.8118, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7163661581137309, |
|
"eval_f1": 0.7311152376922296, |
|
"eval_loss": 0.7255703806877136, |
|
"eval_precision": 0.8185184282178283, |
|
"eval_recall": 0.7163661581137309, |
|
"eval_runtime": 22.9078, |
|
"eval_samples_per_second": 125.896, |
|
"eval_steps_per_second": 15.759, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 4.861917972564697, |
|
"learning_rate": 9.065172868916128e-05, |
|
"loss": 0.7794, |
|
"step": 1926 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7819001386962552, |
|
"eval_f1": 0.7924960521456311, |
|
"eval_loss": 0.608811616897583, |
|
"eval_precision": 0.8200552259690934, |
|
"eval_recall": 0.7819001386962552, |
|
"eval_runtime": 22.5121, |
|
"eval_samples_per_second": 128.109, |
|
"eval_steps_per_second": 16.036, |
|
"step": 1926 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 8.067416191101074, |
|
"learning_rate": 8.663512074027613e-05, |
|
"loss": 0.6835, |
|
"step": 2247 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.7624826629680999, |
|
"eval_f1": 0.778275921221607, |
|
"eval_loss": 0.5834646821022034, |
|
"eval_precision": 0.8169986016936643, |
|
"eval_recall": 0.7624826629680999, |
|
"eval_runtime": 23.3953, |
|
"eval_samples_per_second": 123.273, |
|
"eval_steps_per_second": 15.43, |
|
"step": 2247 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 3.7579257488250732, |
|
"learning_rate": 8.202785264490498e-05, |
|
"loss": 0.5997, |
|
"step": 2568 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.765256588072122, |
|
"eval_f1": 0.7821382649754356, |
|
"eval_loss": 0.6475569605827332, |
|
"eval_precision": 0.826387274966978, |
|
"eval_recall": 0.765256588072122, |
|
"eval_runtime": 22.6325, |
|
"eval_samples_per_second": 127.427, |
|
"eval_steps_per_second": 15.95, |
|
"step": 2568 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 10.09793758392334, |
|
"learning_rate": 7.692088630529957e-05, |
|
"loss": 0.5814, |
|
"step": 2889 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8401525658807212, |
|
"eval_f1": 0.8403607055183363, |
|
"eval_loss": 0.49532002210617065, |
|
"eval_precision": 0.8423956140728975, |
|
"eval_recall": 0.8401525658807212, |
|
"eval_runtime": 25.1073, |
|
"eval_samples_per_second": 114.867, |
|
"eval_steps_per_second": 14.378, |
|
"step": 2889 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 7.585777282714844, |
|
"learning_rate": 7.136468478571602e-05, |
|
"loss": 0.5134, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8103328710124826, |
|
"eval_f1": 0.8220041299869107, |
|
"eval_loss": 0.5335156321525574, |
|
"eval_precision": 0.8534486868065884, |
|
"eval_recall": 0.8103328710124826, |
|
"eval_runtime": 24.7217, |
|
"eval_samples_per_second": 116.659, |
|
"eval_steps_per_second": 14.603, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 20.268178939819336, |
|
"learning_rate": 6.546402506345253e-05, |
|
"loss": 0.5109, |
|
"step": 3531 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.8124133148404993, |
|
"eval_f1": 0.8191514417244506, |
|
"eval_loss": 0.5497095584869385, |
|
"eval_precision": 0.8414741317511313, |
|
"eval_recall": 0.8124133148404993, |
|
"eval_runtime": 23.8572, |
|
"eval_samples_per_second": 120.886, |
|
"eval_steps_per_second": 15.132, |
|
"step": 3531 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 3.8051164150238037, |
|
"learning_rate": 5.931404220195339e-05, |
|
"loss": 0.4073, |
|
"step": 3852 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8311373092926491, |
|
"eval_f1": 0.831620079776665, |
|
"eval_loss": 0.5754452347755432, |
|
"eval_precision": 0.8347554735526493, |
|
"eval_recall": 0.8311373092926491, |
|
"eval_runtime": 24.6815, |
|
"eval_samples_per_second": 116.849, |
|
"eval_steps_per_second": 14.626, |
|
"step": 3852 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 1.562414526939392, |
|
"learning_rate": 5.30138910478373e-05, |
|
"loss": 0.3255, |
|
"step": 4173 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.8574895977808599, |
|
"eval_f1": 0.8525988995704912, |
|
"eval_loss": 0.5593818426132202, |
|
"eval_precision": 0.8620968298732496, |
|
"eval_recall": 0.8574895977808599, |
|
"eval_runtime": 24.0309, |
|
"eval_samples_per_second": 120.012, |
|
"eval_steps_per_second": 15.022, |
|
"step": 4173 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.7332964539527893, |
|
"learning_rate": 4.666514757870119e-05, |
|
"loss": 0.3288, |
|
"step": 4494 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8332177531206657, |
|
"eval_f1": 0.8401776032043191, |
|
"eval_loss": 0.6329751014709473, |
|
"eval_precision": 0.8607345037298127, |
|
"eval_recall": 0.8332177531206657, |
|
"eval_runtime": 23.7481, |
|
"eval_samples_per_second": 121.441, |
|
"eval_steps_per_second": 15.201, |
|
"step": 4494 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 5.967740535736084, |
|
"learning_rate": 4.038959286483306e-05, |
|
"loss": 0.2434, |
|
"step": 4815 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.8606102635228848, |
|
"eval_f1": 0.861866915129384, |
|
"eval_loss": 0.5198509693145752, |
|
"eval_precision": 0.8645868176470374, |
|
"eval_recall": 0.8606102635228848, |
|
"eval_runtime": 23.048, |
|
"eval_samples_per_second": 125.13, |
|
"eval_steps_per_second": 15.663, |
|
"step": 4815 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 19.630001068115234, |
|
"learning_rate": 3.424923705075116e-05, |
|
"loss": 0.2185, |
|
"step": 5136 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.858876560332871, |
|
"eval_f1": 0.860451647543394, |
|
"eval_loss": 0.5324673652648926, |
|
"eval_precision": 0.8647326759799664, |
|
"eval_recall": 0.858876560332871, |
|
"eval_runtime": 22.8629, |
|
"eval_samples_per_second": 126.143, |
|
"eval_steps_per_second": 15.79, |
|
"step": 5136 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"grad_norm": 0.011605961248278618, |
|
"learning_rate": 2.8362827388964596e-05, |
|
"loss": 0.1707, |
|
"step": 5457 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.8640776699029126, |
|
"eval_f1": 0.8597682191227608, |
|
"eval_loss": 0.5524240136146545, |
|
"eval_precision": 0.8638803900922614, |
|
"eval_recall": 0.8640776699029126, |
|
"eval_runtime": 22.516, |
|
"eval_samples_per_second": 128.087, |
|
"eval_steps_per_second": 16.033, |
|
"step": 5457 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 25.897785186767578, |
|
"learning_rate": 2.2825269192267617e-05, |
|
"loss": 0.1702, |
|
"step": 5778 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.8522884882108183, |
|
"eval_f1": 0.8551640876798166, |
|
"eval_loss": 0.5471655130386353, |
|
"eval_precision": 0.8612118088331184, |
|
"eval_recall": 0.8522884882108183, |
|
"eval_runtime": 22.9729, |
|
"eval_samples_per_second": 125.539, |
|
"eval_steps_per_second": 15.714, |
|
"step": 5778 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"grad_norm": 0.07399608939886093, |
|
"learning_rate": 1.7725843316558628e-05, |
|
"loss": 0.128, |
|
"step": 6099 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.8751733703190014, |
|
"eval_f1": 0.8745515169095409, |
|
"eval_loss": 0.559937596321106, |
|
"eval_precision": 0.8758481332673859, |
|
"eval_recall": 0.8751733703190014, |
|
"eval_runtime": 22.9935, |
|
"eval_samples_per_second": 125.427, |
|
"eval_steps_per_second": 15.7, |
|
"step": 6099 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"step": 6099, |
|
"total_flos": 7.550537882380222e+18, |
|
"train_loss": 0.579371564758471, |
|
"train_runtime": 2226.9019, |
|
"train_samples_per_second": 230.275, |
|
"train_steps_per_second": 14.415 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 32100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"total_flos": 7.550537882380222e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|