|
{ |
|
"best_metric": 0.34081346423562414, |
|
"best_model_checkpoint": "videomae-base-finetuned-elder/checkpoint-145", |
|
"epoch": 3.2447916666666665, |
|
"eval_steps": 500, |
|
"global_step": 576, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 5.98492431640625, |
|
"learning_rate": 8.620689655172414e-06, |
|
"loss": 1.7608, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 5.645175457000732, |
|
"learning_rate": 1.7241379310344828e-05, |
|
"loss": 1.7351, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 5.5949578285217285, |
|
"learning_rate": 2.5862068965517244e-05, |
|
"loss": 1.6988, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 7.89724063873291, |
|
"learning_rate": 3.4482758620689657e-05, |
|
"loss": 1.6708, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 6.795271396636963, |
|
"learning_rate": 4.3103448275862066e-05, |
|
"loss": 1.7643, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 5.639424800872803, |
|
"learning_rate": 4.980694980694981e-05, |
|
"loss": 1.709, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 6.942084312438965, |
|
"learning_rate": 4.884169884169885e-05, |
|
"loss": 1.7576, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 6.221732139587402, |
|
"learning_rate": 4.787644787644788e-05, |
|
"loss": 1.728, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 5.884160041809082, |
|
"learning_rate": 4.6911196911196914e-05, |
|
"loss": 1.6656, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 5.812763690948486, |
|
"learning_rate": 4.594594594594595e-05, |
|
"loss": 1.7212, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 5.196298599243164, |
|
"learning_rate": 4.498069498069498e-05, |
|
"loss": 1.7729, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 6.1844162940979, |
|
"learning_rate": 4.401544401544402e-05, |
|
"loss": 1.7089, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 4.422025203704834, |
|
"learning_rate": 4.305019305019305e-05, |
|
"loss": 1.7591, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 4.016156196594238, |
|
"learning_rate": 4.2084942084942086e-05, |
|
"loss": 1.6792, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_accuracy": 0.34081346423562414, |
|
"eval_loss": 1.639670729637146, |
|
"eval_runtime": 914.1791, |
|
"eval_samples_per_second": 1.56, |
|
"eval_steps_per_second": 0.196, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 6.139647960662842, |
|
"learning_rate": 4.111969111969112e-05, |
|
"loss": 1.653, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 5.428959846496582, |
|
"learning_rate": 4.015444015444015e-05, |
|
"loss": 1.7568, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 4.396470069885254, |
|
"learning_rate": 3.918918918918919e-05, |
|
"loss": 1.7393, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 4.388951301574707, |
|
"learning_rate": 3.822393822393823e-05, |
|
"loss": 1.7956, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 9.291723251342773, |
|
"learning_rate": 3.725868725868726e-05, |
|
"loss": 1.7192, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 5.253334045410156, |
|
"learning_rate": 3.6293436293436295e-05, |
|
"loss": 1.6888, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 5.230763912200928, |
|
"learning_rate": 3.532818532818533e-05, |
|
"loss": 1.6972, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 3.834545612335205, |
|
"learning_rate": 3.436293436293436e-05, |
|
"loss": 1.7124, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.553481101989746, |
|
"learning_rate": 3.33976833976834e-05, |
|
"loss": 1.7011, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 4.672926425933838, |
|
"learning_rate": 3.2432432432432436e-05, |
|
"loss": 1.701, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 5.78378963470459, |
|
"learning_rate": 3.1467181467181466e-05, |
|
"loss": 1.597, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 4.538748741149902, |
|
"learning_rate": 3.0501930501930504e-05, |
|
"loss": 1.643, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 3.175865411758423, |
|
"learning_rate": 2.953667953667954e-05, |
|
"loss": 1.6941, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 5.939924240112305, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 1.5795, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 5.927539348602295, |
|
"learning_rate": 2.7606177606177608e-05, |
|
"loss": 1.6326, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_accuracy": 0.3232819074333801, |
|
"eval_loss": 1.6259315013885498, |
|
"eval_runtime": 1008.6021, |
|
"eval_samples_per_second": 1.414, |
|
"eval_steps_per_second": 0.177, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 5.333898067474365, |
|
"learning_rate": 2.6640926640926645e-05, |
|
"loss": 1.5935, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 3.408902406692505, |
|
"learning_rate": 2.5675675675675675e-05, |
|
"loss": 1.8192, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 4.679303169250488, |
|
"learning_rate": 2.4710424710424712e-05, |
|
"loss": 1.6555, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"grad_norm": 6.238588333129883, |
|
"learning_rate": 2.3745173745173746e-05, |
|
"loss": 1.6335, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"grad_norm": 7.497094631195068, |
|
"learning_rate": 2.277992277992278e-05, |
|
"loss": 1.6843, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 7.145444393157959, |
|
"learning_rate": 2.1814671814671817e-05, |
|
"loss": 1.6107, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 2.706979513168335, |
|
"learning_rate": 2.084942084942085e-05, |
|
"loss": 1.7367, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"grad_norm": 5.786703109741211, |
|
"learning_rate": 1.9884169884169884e-05, |
|
"loss": 1.6931, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 6.3389177322387695, |
|
"learning_rate": 1.891891891891892e-05, |
|
"loss": 1.5662, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"grad_norm": 3.2628817558288574, |
|
"learning_rate": 1.7953667953667955e-05, |
|
"loss": 1.6322, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 4.778809547424316, |
|
"learning_rate": 1.698841698841699e-05, |
|
"loss": 1.675, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"grad_norm": 3.829134941101074, |
|
"learning_rate": 1.6023166023166026e-05, |
|
"loss": 1.66, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 7.900679588317871, |
|
"learning_rate": 1.505791505791506e-05, |
|
"loss": 1.7465, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 4.327514171600342, |
|
"learning_rate": 1.4092664092664093e-05, |
|
"loss": 1.5989, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"eval_accuracy": 0.34011220196353437, |
|
"eval_loss": 1.6231606006622314, |
|
"eval_runtime": 965.0554, |
|
"eval_samples_per_second": 1.478, |
|
"eval_steps_per_second": 0.185, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"grad_norm": 9.593939781188965, |
|
"learning_rate": 1.3127413127413127e-05, |
|
"loss": 1.688, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"grad_norm": 7.807051181793213, |
|
"learning_rate": 1.2162162162162164e-05, |
|
"loss": 1.6609, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"grad_norm": 12.353256225585938, |
|
"learning_rate": 1.1196911196911197e-05, |
|
"loss": 1.6091, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"grad_norm": 5.419424057006836, |
|
"learning_rate": 1.0231660231660233e-05, |
|
"loss": 1.6206, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"grad_norm": 4.162823677062988, |
|
"learning_rate": 9.266409266409266e-06, |
|
"loss": 1.64, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"grad_norm": 2.6399056911468506, |
|
"learning_rate": 8.301158301158302e-06, |
|
"loss": 1.4998, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"grad_norm": 7.833770751953125, |
|
"learning_rate": 7.335907335907337e-06, |
|
"loss": 1.5723, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"grad_norm": 9.628315925598145, |
|
"learning_rate": 6.370656370656371e-06, |
|
"loss": 1.6482, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"grad_norm": 5.782583236694336, |
|
"learning_rate": 5.405405405405406e-06, |
|
"loss": 1.5077, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"grad_norm": 5.132481575012207, |
|
"learning_rate": 4.4401544401544405e-06, |
|
"loss": 1.7713, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"grad_norm": 4.762849807739258, |
|
"learning_rate": 3.474903474903475e-06, |
|
"loss": 1.5803, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 4.8944315910339355, |
|
"learning_rate": 2.5096525096525096e-06, |
|
"loss": 1.7035, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"grad_norm": 4.528627872467041, |
|
"learning_rate": 1.5444015444015445e-06, |
|
"loss": 1.5533, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"grad_norm": 9.058985710144043, |
|
"learning_rate": 5.791505791505791e-07, |
|
"loss": 1.7029, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_accuracy": 0.3288920056100982, |
|
"eval_loss": 1.6284162998199463, |
|
"eval_runtime": 971.7776, |
|
"eval_samples_per_second": 1.467, |
|
"eval_steps_per_second": 0.184, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"step": 576, |
|
"total_flos": 5.730855473985159e+18, |
|
"train_loss": 1.6746184544430838, |
|
"train_runtime": 9779.0761, |
|
"train_samples_per_second": 0.471, |
|
"train_steps_per_second": 0.059 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_accuracy": 0.34811715481171546, |
|
"eval_loss": 1.6624772548675537, |
|
"eval_runtime": 751.8376, |
|
"eval_samples_per_second": 1.589, |
|
"eval_steps_per_second": 0.2, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_accuracy": 0.34811715481171546, |
|
"eval_loss": 1.6624772548675537, |
|
"eval_runtime": 754.0286, |
|
"eval_samples_per_second": 1.585, |
|
"eval_steps_per_second": 0.199, |
|
"step": 576 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 576, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 5.730855473985159e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|