|
{ |
|
"best_metric": 0.16443897783756256, |
|
"best_model_checkpoint": "./vit-base-beans/checkpoint-640", |
|
"epoch": 20.0, |
|
"global_step": 640, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.84375e-05, |
|
"loss": 3.3252, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9.687500000000001e-05, |
|
"loss": 3.217, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.53125e-05, |
|
"loss": 3.0799, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 9.375e-05, |
|
"loss": 2.8394, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_accuracy": 0.4815668202764977, |
|
"eval_loss": 2.7347912788391113, |
|
"eval_runtime": 5.6305, |
|
"eval_samples_per_second": 77.081, |
|
"eval_steps_per_second": 9.768, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.21875e-05, |
|
"loss": 2.598, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 9.062500000000001e-05, |
|
"loss": 2.417, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.90625e-05, |
|
"loss": 2.111, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.75e-05, |
|
"loss": 1.9174, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.8963133640552995, |
|
"eval_loss": 1.8127654790878296, |
|
"eval_runtime": 5.3595, |
|
"eval_samples_per_second": 80.978, |
|
"eval_steps_per_second": 10.262, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 8.593750000000001e-05, |
|
"loss": 1.666, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 8.4375e-05, |
|
"loss": 1.5159, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 8.28125e-05, |
|
"loss": 1.3105, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 8.125000000000001e-05, |
|
"loss": 1.1859, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_accuracy": 0.9470046082949308, |
|
"eval_loss": 1.1414676904678345, |
|
"eval_runtime": 5.3582, |
|
"eval_samples_per_second": 80.997, |
|
"eval_steps_per_second": 10.265, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 7.96875e-05, |
|
"loss": 1.0525, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 7.8125e-05, |
|
"loss": 0.9085, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 7.65625e-05, |
|
"loss": 0.8207, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7413, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9654377880184332, |
|
"eval_loss": 0.7720305919647217, |
|
"eval_runtime": 5.6625, |
|
"eval_samples_per_second": 76.645, |
|
"eval_steps_per_second": 9.713, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 7.34375e-05, |
|
"loss": 0.6334, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 7.1875e-05, |
|
"loss": 0.6058, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 7.031250000000001e-05, |
|
"loss": 0.5319, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 6.875e-05, |
|
"loss": 0.4761, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"eval_accuracy": 0.9838709677419355, |
|
"eval_loss": 0.5084273815155029, |
|
"eval_runtime": 5.4846, |
|
"eval_samples_per_second": 79.131, |
|
"eval_steps_per_second": 10.028, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 6.71875e-05, |
|
"loss": 0.4109, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 6.562500000000001e-05, |
|
"loss": 0.3707, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 6.40625e-05, |
|
"loss": 0.3536, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 6.25e-05, |
|
"loss": 0.3108, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"eval_accuracy": 0.9746543778801844, |
|
"eval_loss": 0.36055463552474976, |
|
"eval_runtime": 5.6696, |
|
"eval_samples_per_second": 76.549, |
|
"eval_steps_per_second": 9.701, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"learning_rate": 6.0937500000000004e-05, |
|
"loss": 0.2789, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 5.9375e-05, |
|
"loss": 0.2905, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 5.78125e-05, |
|
"loss": 0.2462, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 5.6250000000000005e-05, |
|
"loss": 0.251, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"eval_accuracy": 0.9769585253456221, |
|
"eval_loss": 0.2958492040634155, |
|
"eval_runtime": 7.6402, |
|
"eval_samples_per_second": 56.805, |
|
"eval_steps_per_second": 7.199, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 5.46875e-05, |
|
"loss": 0.2171, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 5.3125000000000004e-05, |
|
"loss": 0.2064, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"learning_rate": 5.15625e-05, |
|
"loss": 0.2116, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1896, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9769585253456221, |
|
"eval_loss": 0.24788345396518707, |
|
"eval_runtime": 5.2479, |
|
"eval_samples_per_second": 82.699, |
|
"eval_steps_per_second": 10.48, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"learning_rate": 4.8437500000000005e-05, |
|
"loss": 0.1783, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 0.1859, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.94, |
|
"learning_rate": 4.5312500000000004e-05, |
|
"loss": 0.1705, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.1659, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"eval_accuracy": 0.9838709677419355, |
|
"eval_loss": 0.23752211034297943, |
|
"eval_runtime": 5.7417, |
|
"eval_samples_per_second": 75.588, |
|
"eval_steps_per_second": 9.579, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 4.21875e-05, |
|
"loss": 0.1753, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 11.88, |
|
"learning_rate": 4.0625000000000005e-05, |
|
"loss": 0.1509, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 12.19, |
|
"learning_rate": 3.90625e-05, |
|
"loss": 0.1447, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.1401, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.20333679020404816, |
|
"eval_runtime": 5.2814, |
|
"eval_samples_per_second": 82.175, |
|
"eval_steps_per_second": 10.414, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.81, |
|
"learning_rate": 3.59375e-05, |
|
"loss": 0.1508, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"learning_rate": 3.4375e-05, |
|
"loss": 0.1364, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 13.44, |
|
"learning_rate": 3.2812500000000005e-05, |
|
"loss": 0.1415, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.131, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.19693893194198608, |
|
"eval_runtime": 5.0279, |
|
"eval_samples_per_second": 86.318, |
|
"eval_steps_per_second": 10.939, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 14.06, |
|
"learning_rate": 2.96875e-05, |
|
"loss": 0.1264, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"learning_rate": 2.8125000000000003e-05, |
|
"loss": 0.1376, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 14.69, |
|
"learning_rate": 2.6562500000000002e-05, |
|
"loss": 0.1211, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.1162, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.1791529506444931, |
|
"eval_runtime": 4.2132, |
|
"eval_samples_per_second": 103.009, |
|
"eval_steps_per_second": 13.054, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 15.31, |
|
"learning_rate": 2.34375e-05, |
|
"loss": 0.1285, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 2.1875e-05, |
|
"loss": 0.1136, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.94, |
|
"learning_rate": 2.0312500000000002e-05, |
|
"loss": 0.1117, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.11, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.17193575203418732, |
|
"eval_runtime": 4.2988, |
|
"eval_samples_per_second": 100.957, |
|
"eval_steps_per_second": 12.794, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 16.56, |
|
"learning_rate": 1.71875e-05, |
|
"loss": 0.1087, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 16.88, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 0.1065, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 17.19, |
|
"learning_rate": 1.4062500000000001e-05, |
|
"loss": 0.1207, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.1056, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.16766877472400665, |
|
"eval_runtime": 5.7239, |
|
"eval_samples_per_second": 75.823, |
|
"eval_steps_per_second": 9.609, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 17.81, |
|
"learning_rate": 1.09375e-05, |
|
"loss": 0.1051, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 18.12, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 0.1148, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 18.44, |
|
"learning_rate": 7.8125e-06, |
|
"loss": 0.1043, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.1128, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"eval_accuracy": 0.9723502304147466, |
|
"eval_loss": 0.17274537682533264, |
|
"eval_runtime": 5.5073, |
|
"eval_samples_per_second": 78.805, |
|
"eval_steps_per_second": 9.987, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 19.06, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"loss": 0.102, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 19.38, |
|
"learning_rate": 3.125e-06, |
|
"loss": 0.1028, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 19.69, |
|
"learning_rate": 1.5625e-06, |
|
"loss": 0.1096, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.1018, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9792626728110599, |
|
"eval_loss": 0.16443897783756256, |
|
"eval_runtime": 4.9159, |
|
"eval_samples_per_second": 88.285, |
|
"eval_steps_per_second": 11.188, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 640, |
|
"total_flos": 1.1843641391623373e+18, |
|
"train_loss": 0.6315841894596815, |
|
"train_runtime": 473.5366, |
|
"train_samples_per_second": 32.268, |
|
"train_steps_per_second": 1.352 |
|
} |
|
], |
|
"max_steps": 640, |
|
"num_train_epochs": 20, |
|
"total_flos": 1.1843641391623373e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|