|
{ |
|
"best_metric": 0.9814814814814815, |
|
"best_model_checkpoint": "vit-base-patch16-224-Trial007-YEL_STEM2/checkpoint-20", |
|
"epoch": 44.44444444444444, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6465, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6676, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_accuracy": 0.7222222222222222, |
|
"eval_loss": 0.6180152297019958, |
|
"eval_runtime": 0.2619, |
|
"eval_samples_per_second": 206.218, |
|
"eval_steps_per_second": 3.819, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.5752, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2e-05, |
|
"loss": 0.5805, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_accuracy": 0.7592592592592593, |
|
"eval_loss": 0.5003750920295715, |
|
"eval_runtime": 0.2607, |
|
"eval_samples_per_second": 207.107, |
|
"eval_steps_per_second": 3.835, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.4833, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3e-05, |
|
"loss": 0.5012, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.37833932042121887, |
|
"eval_runtime": 0.2715, |
|
"eval_samples_per_second": 198.886, |
|
"eval_steps_per_second": 3.683, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.3602, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 4e-05, |
|
"loss": 0.396, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.2794, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.22847579419612885, |
|
"eval_runtime": 0.269, |
|
"eval_samples_per_second": 200.773, |
|
"eval_steps_per_second": 3.718, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3094, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 4.9444444444444446e-05, |
|
"loss": 0.2695, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"eval_accuracy": 0.8888888888888888, |
|
"eval_loss": 0.25508877635002136, |
|
"eval_runtime": 0.2701, |
|
"eval_samples_per_second": 199.957, |
|
"eval_steps_per_second": 3.703, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.2595, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.2782, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.10787578672170639, |
|
"eval_runtime": 0.2599, |
|
"eval_samples_per_second": 207.776, |
|
"eval_steps_per_second": 3.848, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"loss": 0.1556, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.2131, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.12049588561058044, |
|
"eval_runtime": 0.2817, |
|
"eval_samples_per_second": 191.698, |
|
"eval_steps_per_second": 3.55, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.1482, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 4.6111111111111115e-05, |
|
"loss": 0.1872, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.555555555555556e-05, |
|
"loss": 0.1537, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.18612359464168549, |
|
"eval_runtime": 0.2613, |
|
"eval_samples_per_second": 206.693, |
|
"eval_steps_per_second": 3.828, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.1043, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.1739, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11719384789466858, |
|
"eval_runtime": 0.2622, |
|
"eval_samples_per_second": 205.974, |
|
"eval_steps_per_second": 3.814, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 4.388888888888889e-05, |
|
"loss": 0.3945, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.1059, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.10919703543186188, |
|
"eval_runtime": 0.2607, |
|
"eval_samples_per_second": 207.114, |
|
"eval_steps_per_second": 3.835, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 0.1378, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.146, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.10719860345125198, |
|
"eval_runtime": 0.2613, |
|
"eval_samples_per_second": 206.624, |
|
"eval_steps_per_second": 3.826, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.1155, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 4.111111111111111e-05, |
|
"loss": 0.0854, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.055555555555556e-05, |
|
"loss": 0.088, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.10145124793052673, |
|
"eval_runtime": 0.2622, |
|
"eval_samples_per_second": 205.964, |
|
"eval_steps_per_second": 3.814, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 12.44, |
|
"learning_rate": 4e-05, |
|
"loss": 0.09, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 12.89, |
|
"learning_rate": 3.944444444444445e-05, |
|
"loss": 0.1304, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 12.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11511386185884476, |
|
"eval_runtime": 0.2626, |
|
"eval_samples_per_second": 205.674, |
|
"eval_steps_per_second": 3.809, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.0742, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 13.78, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 0.0924, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 13.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.131294846534729, |
|
"eval_runtime": 0.2663, |
|
"eval_samples_per_second": 202.811, |
|
"eval_steps_per_second": 3.756, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 14.22, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.0868, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 3.722222222222222e-05, |
|
"loss": 0.091, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11780886352062225, |
|
"eval_runtime": 0.2589, |
|
"eval_samples_per_second": 208.602, |
|
"eval_steps_per_second": 3.863, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 15.11, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.1066, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 15.56, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.1028, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.0508, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.09711939841508865, |
|
"eval_runtime": 0.2672, |
|
"eval_samples_per_second": 202.064, |
|
"eval_steps_per_second": 3.742, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 16.44, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.1105, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 16.89, |
|
"learning_rate": 3.444444444444445e-05, |
|
"loss": 0.1004, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 16.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11753766983747482, |
|
"eval_runtime": 0.2602, |
|
"eval_samples_per_second": 207.572, |
|
"eval_steps_per_second": 3.844, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 3.388888888888889e-05, |
|
"loss": 0.0688, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 17.78, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.1097, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 17.78, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.14230388402938843, |
|
"eval_runtime": 0.2664, |
|
"eval_samples_per_second": 202.739, |
|
"eval_steps_per_second": 3.754, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 18.22, |
|
"learning_rate": 3.277777777777778e-05, |
|
"loss": 0.1049, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 3.222222222222223e-05, |
|
"loss": 0.0758, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.15965422987937927, |
|
"eval_runtime": 0.2786, |
|
"eval_samples_per_second": 193.807, |
|
"eval_steps_per_second": 3.589, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 19.11, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 0.0947, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 19.56, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.0425, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.0687, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.12045230716466904, |
|
"eval_runtime": 0.2678, |
|
"eval_samples_per_second": 201.68, |
|
"eval_steps_per_second": 3.735, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 20.44, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0722, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 20.89, |
|
"learning_rate": 2.9444444444444448e-05, |
|
"loss": 0.0513, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 20.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1106700748205185, |
|
"eval_runtime": 0.2701, |
|
"eval_samples_per_second": 199.943, |
|
"eval_steps_per_second": 3.703, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 0.1179, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 21.78, |
|
"learning_rate": 2.8333333333333335e-05, |
|
"loss": 0.0755, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 21.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11499401181936264, |
|
"eval_runtime": 0.2594, |
|
"eval_samples_per_second": 208.189, |
|
"eval_steps_per_second": 3.855, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.0559, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 2.7222222222222223e-05, |
|
"loss": 0.0897, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"eval_accuracy": 0.9629629629629629, |
|
"eval_loss": 0.13321787118911743, |
|
"eval_runtime": 0.2629, |
|
"eval_samples_per_second": 205.413, |
|
"eval_steps_per_second": 3.804, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 23.11, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.0669, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 23.56, |
|
"learning_rate": 2.6111111111111114e-05, |
|
"loss": 0.0994, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 2.5555555555555554e-05, |
|
"loss": 0.0439, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.12632034718990326, |
|
"eval_runtime": 0.263, |
|
"eval_samples_per_second": 205.294, |
|
"eval_steps_per_second": 3.802, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 24.44, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0317, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 24.89, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.0607, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 24.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.11113300919532776, |
|
"eval_runtime": 0.2772, |
|
"eval_samples_per_second": 194.823, |
|
"eval_steps_per_second": 3.608, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 2.3888888888888892e-05, |
|
"loss": 0.0379, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 25.78, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.0719, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 25.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.10038212686777115, |
|
"eval_runtime": 0.2604, |
|
"eval_samples_per_second": 207.335, |
|
"eval_steps_per_second": 3.84, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 26.22, |
|
"learning_rate": 2.277777777777778e-05, |
|
"loss": 0.069, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.0599, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.10639392584562302, |
|
"eval_runtime": 0.2743, |
|
"eval_samples_per_second": 196.877, |
|
"eval_steps_per_second": 3.646, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 27.11, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.0882, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 27.56, |
|
"learning_rate": 2.111111111111111e-05, |
|
"loss": 0.0658, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 2.0555555555555555e-05, |
|
"loss": 0.0613, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1355225145816803, |
|
"eval_runtime": 0.2608, |
|
"eval_samples_per_second": 207.094, |
|
"eval_steps_per_second": 3.835, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 28.44, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0841, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 28.89, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.0689, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 28.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.14443787932395935, |
|
"eval_runtime": 0.2612, |
|
"eval_samples_per_second": 206.722, |
|
"eval_steps_per_second": 3.828, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 0.0677, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 29.78, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.0754, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 29.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13980631530284882, |
|
"eval_runtime": 0.2608, |
|
"eval_samples_per_second": 207.038, |
|
"eval_steps_per_second": 3.834, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 30.22, |
|
"learning_rate": 1.777777777777778e-05, |
|
"loss": 0.0538, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 1.7222222222222224e-05, |
|
"loss": 0.0835, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13446056842803955, |
|
"eval_runtime": 0.2647, |
|
"eval_samples_per_second": 204.022, |
|
"eval_steps_per_second": 3.778, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 31.11, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0378, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 31.56, |
|
"learning_rate": 1.6111111111111115e-05, |
|
"loss": 0.1164, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.0801, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13478641211986542, |
|
"eval_runtime": 0.2726, |
|
"eval_samples_per_second": 198.093, |
|
"eval_steps_per_second": 3.668, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 32.44, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.0586, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 32.89, |
|
"learning_rate": 1.4444444444444444e-05, |
|
"loss": 0.0701, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 32.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13650549948215485, |
|
"eval_runtime": 0.2625, |
|
"eval_samples_per_second": 205.719, |
|
"eval_steps_per_second": 3.81, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.0728, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 33.78, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0647, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 33.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13482581079006195, |
|
"eval_runtime": 0.2622, |
|
"eval_samples_per_second": 205.981, |
|
"eval_steps_per_second": 3.814, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 34.22, |
|
"learning_rate": 1.2777777777777777e-05, |
|
"loss": 0.0499, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"loss": 0.0982, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1346072554588318, |
|
"eval_runtime": 0.264, |
|
"eval_samples_per_second": 204.546, |
|
"eval_steps_per_second": 3.788, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 35.11, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.0999, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 35.56, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.0657, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.0555555555555555e-05, |
|
"loss": 0.0671, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1377686858177185, |
|
"eval_runtime": 0.2607, |
|
"eval_samples_per_second": 207.162, |
|
"eval_steps_per_second": 3.836, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 36.44, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0674, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 36.89, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 0.054, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 36.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1371222585439682, |
|
"eval_runtime": 0.2634, |
|
"eval_samples_per_second": 205.012, |
|
"eval_steps_per_second": 3.797, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 8.88888888888889e-06, |
|
"loss": 0.0314, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 37.78, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0735, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 37.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13551822304725647, |
|
"eval_runtime": 0.2625, |
|
"eval_samples_per_second": 205.737, |
|
"eval_steps_per_second": 3.81, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 38.22, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 0.0881, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 7.222222222222222e-06, |
|
"loss": 0.0736, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13492508232593536, |
|
"eval_runtime": 0.2758, |
|
"eval_samples_per_second": 195.796, |
|
"eval_steps_per_second": 3.626, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 39.11, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0648, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 39.56, |
|
"learning_rate": 6.111111111111111e-06, |
|
"loss": 0.0543, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.0287, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13293945789337158, |
|
"eval_runtime": 0.2609, |
|
"eval_samples_per_second": 207.01, |
|
"eval_steps_per_second": 3.834, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 40.44, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0417, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 40.89, |
|
"learning_rate": 4.444444444444445e-06, |
|
"loss": 0.0539, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 40.89, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1322045475244522, |
|
"eval_runtime": 0.2653, |
|
"eval_samples_per_second": 203.574, |
|
"eval_steps_per_second": 3.77, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 0.0602, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 41.78, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0483, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 41.78, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13241925835609436, |
|
"eval_runtime": 0.2756, |
|
"eval_samples_per_second": 195.953, |
|
"eval_steps_per_second": 3.629, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 42.22, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.0855, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 2.2222222222222225e-06, |
|
"loss": 0.083, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13193319737911224, |
|
"eval_runtime": 0.2638, |
|
"eval_samples_per_second": 204.69, |
|
"eval_steps_per_second": 3.791, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 43.11, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.0501, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 43.56, |
|
"learning_rate": 1.1111111111111112e-06, |
|
"loss": 0.0751, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 5.555555555555556e-07, |
|
"loss": 0.0558, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.1318960040807724, |
|
"eval_runtime": 0.2634, |
|
"eval_samples_per_second": 204.99, |
|
"eval_steps_per_second": 3.796, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"learning_rate": 0.0, |
|
"loss": 0.0752, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"eval_accuracy": 0.9814814814814815, |
|
"eval_loss": 0.13192817568778992, |
|
"eval_runtime": 0.2735, |
|
"eval_samples_per_second": 197.452, |
|
"eval_steps_per_second": 3.657, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"step": 100, |
|
"total_flos": 1.6586385457107272e+18, |
|
"train_loss": 0.13114935230463742, |
|
"train_runtime": 687.5457, |
|
"train_samples_per_second": 34.979, |
|
"train_steps_per_second": 0.145 |
|
} |
|
], |
|
"max_steps": 100, |
|
"num_train_epochs": 50, |
|
"total_flos": 1.6586385457107272e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|