Abdullah1's picture
End of training
2724176 verified
raw
history blame
No virus
16.1 kB
{
"best_metric": 0.8561643835616438,
"best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-dcsass-shoplifting-subset/checkpoint-780",
"epoch": 4.194871794871795,
"eval_steps": 500,
"global_step": 780,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01282051282051282,
"grad_norm": 6.756944179534912,
"learning_rate": 6.41025641025641e-06,
"loss": 0.7103,
"step": 10
},
{
"epoch": 0.02564102564102564,
"grad_norm": 7.49935245513916,
"learning_rate": 1.282051282051282e-05,
"loss": 0.5933,
"step": 20
},
{
"epoch": 0.038461538461538464,
"grad_norm": 8.1102294921875,
"learning_rate": 1.923076923076923e-05,
"loss": 0.391,
"step": 30
},
{
"epoch": 0.05128205128205128,
"grad_norm": 14.223252296447754,
"learning_rate": 2.564102564102564e-05,
"loss": 0.5162,
"step": 40
},
{
"epoch": 0.0641025641025641,
"grad_norm": 10.161632537841797,
"learning_rate": 3.205128205128206e-05,
"loss": 0.5433,
"step": 50
},
{
"epoch": 0.07692307692307693,
"grad_norm": 7.198577404022217,
"learning_rate": 3.846153846153846e-05,
"loss": 0.7253,
"step": 60
},
{
"epoch": 0.08974358974358974,
"grad_norm": 15.455668449401855,
"learning_rate": 4.4871794871794874e-05,
"loss": 0.6042,
"step": 70
},
{
"epoch": 0.10256410256410256,
"grad_norm": 5.9158935546875,
"learning_rate": 4.985754985754986e-05,
"loss": 0.4834,
"step": 80
},
{
"epoch": 0.11538461538461539,
"grad_norm": 13.751437187194824,
"learning_rate": 4.9145299145299147e-05,
"loss": 0.6672,
"step": 90
},
{
"epoch": 0.1282051282051282,
"grad_norm": 6.230504989624023,
"learning_rate": 4.8433048433048433e-05,
"loss": 0.561,
"step": 100
},
{
"epoch": 0.14102564102564102,
"grad_norm": 0.061720337718725204,
"learning_rate": 4.772079772079772e-05,
"loss": 0.4631,
"step": 110
},
{
"epoch": 0.15384615384615385,
"grad_norm": 14.445475578308105,
"learning_rate": 4.700854700854701e-05,
"loss": 0.5993,
"step": 120
},
{
"epoch": 0.16666666666666666,
"grad_norm": 5.024906158447266,
"learning_rate": 4.62962962962963e-05,
"loss": 0.6052,
"step": 130
},
{
"epoch": 0.1794871794871795,
"grad_norm": 5.089183807373047,
"learning_rate": 4.558404558404559e-05,
"loss": 0.5345,
"step": 140
},
{
"epoch": 0.19230769230769232,
"grad_norm": 0.20361727476119995,
"learning_rate": 4.4871794871794874e-05,
"loss": 0.3314,
"step": 150
},
{
"epoch": 0.2012820512820513,
"eval_accuracy": 0.821917808219178,
"eval_loss": 0.6146280169487,
"eval_runtime": 86.3385,
"eval_samples_per_second": 1.691,
"eval_steps_per_second": 0.429,
"step": 157
},
{
"epoch": 1.0038461538461538,
"grad_norm": 0.9299275875091553,
"learning_rate": 4.415954415954416e-05,
"loss": 0.5923,
"step": 160
},
{
"epoch": 1.0166666666666666,
"grad_norm": 1.9701108932495117,
"learning_rate": 4.344729344729345e-05,
"loss": 0.6272,
"step": 170
},
{
"epoch": 1.0294871794871794,
"grad_norm": 7.168660640716553,
"learning_rate": 4.2735042735042735e-05,
"loss": 0.3787,
"step": 180
},
{
"epoch": 1.0423076923076924,
"grad_norm": 17.313304901123047,
"learning_rate": 4.202279202279202e-05,
"loss": 0.4095,
"step": 190
},
{
"epoch": 1.0551282051282052,
"grad_norm": 3.225602865219116,
"learning_rate": 4.131054131054131e-05,
"loss": 0.4833,
"step": 200
},
{
"epoch": 1.067948717948718,
"grad_norm": 11.712457656860352,
"learning_rate": 4.05982905982906e-05,
"loss": 0.6347,
"step": 210
},
{
"epoch": 1.0807692307692307,
"grad_norm": 15.161521911621094,
"learning_rate": 3.988603988603989e-05,
"loss": 0.4419,
"step": 220
},
{
"epoch": 1.0935897435897437,
"grad_norm": 11.030887603759766,
"learning_rate": 3.9173789173789176e-05,
"loss": 0.5758,
"step": 230
},
{
"epoch": 1.1064102564102565,
"grad_norm": 5.54761266708374,
"learning_rate": 3.846153846153846e-05,
"loss": 0.6464,
"step": 240
},
{
"epoch": 1.1192307692307693,
"grad_norm": 8.304089546203613,
"learning_rate": 3.774928774928775e-05,
"loss": 0.3947,
"step": 250
},
{
"epoch": 1.132051282051282,
"grad_norm": 0.35803475975990295,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.4621,
"step": 260
},
{
"epoch": 1.1448717948717948,
"grad_norm": 19.831405639648438,
"learning_rate": 3.6324786324786323e-05,
"loss": 0.8061,
"step": 270
},
{
"epoch": 1.1576923076923076,
"grad_norm": 0.5348687767982483,
"learning_rate": 3.561253561253561e-05,
"loss": 0.4076,
"step": 280
},
{
"epoch": 1.1705128205128206,
"grad_norm": 6.216036796569824,
"learning_rate": 3.4900284900284904e-05,
"loss": 0.403,
"step": 290
},
{
"epoch": 1.1833333333333333,
"grad_norm": 1.6393040418624878,
"learning_rate": 3.418803418803419e-05,
"loss": 0.4308,
"step": 300
},
{
"epoch": 1.1961538461538461,
"grad_norm": 5.89309549331665,
"learning_rate": 3.347578347578348e-05,
"loss": 0.3263,
"step": 310
},
{
"epoch": 1.2012820512820512,
"eval_accuracy": 0.8287671232876712,
"eval_loss": 0.6184389591217041,
"eval_runtime": 44.0336,
"eval_samples_per_second": 3.316,
"eval_steps_per_second": 0.84,
"step": 314
},
{
"epoch": 2.0076923076923077,
"grad_norm": 8.897279739379883,
"learning_rate": 3.2763532763532764e-05,
"loss": 0.736,
"step": 320
},
{
"epoch": 2.0205128205128204,
"grad_norm": 11.958976745605469,
"learning_rate": 3.205128205128206e-05,
"loss": 0.0545,
"step": 330
},
{
"epoch": 2.033333333333333,
"grad_norm": 0.10042566061019897,
"learning_rate": 3.133903133903134e-05,
"loss": 0.3544,
"step": 340
},
{
"epoch": 2.046153846153846,
"grad_norm": 3.468107223510742,
"learning_rate": 3.0626780626780625e-05,
"loss": 0.4753,
"step": 350
},
{
"epoch": 2.0589743589743588,
"grad_norm": 0.6979026794433594,
"learning_rate": 2.9914529914529915e-05,
"loss": 0.1313,
"step": 360
},
{
"epoch": 2.071794871794872,
"grad_norm": 3.0566658973693848,
"learning_rate": 2.9202279202279202e-05,
"loss": 0.5758,
"step": 370
},
{
"epoch": 2.0846153846153848,
"grad_norm": 0.26063987612724304,
"learning_rate": 2.8490028490028492e-05,
"loss": 0.2311,
"step": 380
},
{
"epoch": 2.0974358974358975,
"grad_norm": 3.1704676151275635,
"learning_rate": 2.777777777777778e-05,
"loss": 0.3121,
"step": 390
},
{
"epoch": 2.1102564102564103,
"grad_norm": 0.06660736352205276,
"learning_rate": 2.706552706552707e-05,
"loss": 0.2649,
"step": 400
},
{
"epoch": 2.123076923076923,
"grad_norm": 0.8849107027053833,
"learning_rate": 2.6353276353276356e-05,
"loss": 0.4632,
"step": 410
},
{
"epoch": 2.135897435897436,
"grad_norm": 16.48140525817871,
"learning_rate": 2.564102564102564e-05,
"loss": 0.5191,
"step": 420
},
{
"epoch": 2.1487179487179486,
"grad_norm": 2.628391742706299,
"learning_rate": 2.492877492877493e-05,
"loss": 0.405,
"step": 430
},
{
"epoch": 2.1615384615384614,
"grad_norm": 1.1029433012008667,
"learning_rate": 2.4216524216524217e-05,
"loss": 0.3149,
"step": 440
},
{
"epoch": 2.174358974358974,
"grad_norm": 8.070186614990234,
"learning_rate": 2.3504273504273504e-05,
"loss": 0.5851,
"step": 450
},
{
"epoch": 2.1871794871794874,
"grad_norm": 12.600333213806152,
"learning_rate": 2.2792022792022794e-05,
"loss": 0.3032,
"step": 460
},
{
"epoch": 2.2,
"grad_norm": 1.988337755203247,
"learning_rate": 2.207977207977208e-05,
"loss": 0.6332,
"step": 470
},
{
"epoch": 2.2012820512820515,
"eval_accuracy": 0.8424657534246576,
"eval_loss": 0.4275156855583191,
"eval_runtime": 44.8316,
"eval_samples_per_second": 3.257,
"eval_steps_per_second": 0.825,
"step": 471
},
{
"epoch": 3.0115384615384615,
"grad_norm": 0.7842563986778259,
"learning_rate": 2.1367521367521368e-05,
"loss": 0.2056,
"step": 480
},
{
"epoch": 3.0243589743589743,
"grad_norm": 9.073537826538086,
"learning_rate": 2.0655270655270654e-05,
"loss": 0.2923,
"step": 490
},
{
"epoch": 3.037179487179487,
"grad_norm": 9.187246322631836,
"learning_rate": 1.9943019943019945e-05,
"loss": 0.2992,
"step": 500
},
{
"epoch": 3.05,
"grad_norm": 20.043611526489258,
"learning_rate": 1.923076923076923e-05,
"loss": 0.5296,
"step": 510
},
{
"epoch": 3.0628205128205126,
"grad_norm": 28.867929458618164,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.1522,
"step": 520
},
{
"epoch": 3.075641025641026,
"grad_norm": 13.749585151672363,
"learning_rate": 1.7806267806267805e-05,
"loss": 0.5946,
"step": 530
},
{
"epoch": 3.0884615384615386,
"grad_norm": 3.9784131050109863,
"learning_rate": 1.7094017094017095e-05,
"loss": 0.7594,
"step": 540
},
{
"epoch": 3.1012820512820514,
"grad_norm": 0.4377283453941345,
"learning_rate": 1.6381766381766382e-05,
"loss": 0.0527,
"step": 550
},
{
"epoch": 3.114102564102564,
"grad_norm": 0.44730284810066223,
"learning_rate": 1.566951566951567e-05,
"loss": 0.3361,
"step": 560
},
{
"epoch": 3.126923076923077,
"grad_norm": 2.1688125133514404,
"learning_rate": 1.4957264957264958e-05,
"loss": 0.4073,
"step": 570
},
{
"epoch": 3.1397435897435897,
"grad_norm": 6.609060287475586,
"learning_rate": 1.4245014245014246e-05,
"loss": 0.5241,
"step": 580
},
{
"epoch": 3.1525641025641025,
"grad_norm": 18.088504791259766,
"learning_rate": 1.3532763532763535e-05,
"loss": 0.1817,
"step": 590
},
{
"epoch": 3.1653846153846152,
"grad_norm": 3.1018402576446533,
"learning_rate": 1.282051282051282e-05,
"loss": 0.1476,
"step": 600
},
{
"epoch": 3.178205128205128,
"grad_norm": 0.2694937586784363,
"learning_rate": 1.2108262108262108e-05,
"loss": 0.1166,
"step": 610
},
{
"epoch": 3.1910256410256412,
"grad_norm": 0.2130882292985916,
"learning_rate": 1.1396011396011397e-05,
"loss": 0.0405,
"step": 620
},
{
"epoch": 3.2012820512820515,
"eval_accuracy": 0.8424657534246576,
"eval_loss": 0.76870197057724,
"eval_runtime": 44.2253,
"eval_samples_per_second": 3.301,
"eval_steps_per_second": 0.837,
"step": 628
},
{
"epoch": 4.0025641025641026,
"grad_norm": 37.76756286621094,
"learning_rate": 1.0683760683760684e-05,
"loss": 0.4644,
"step": 630
},
{
"epoch": 4.015384615384615,
"grad_norm": 0.223415806889534,
"learning_rate": 9.971509971509972e-06,
"loss": 0.5616,
"step": 640
},
{
"epoch": 4.028205128205128,
"grad_norm": 0.11729208379983902,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0667,
"step": 650
},
{
"epoch": 4.041025641025641,
"grad_norm": 3.586672782897949,
"learning_rate": 8.547008547008548e-06,
"loss": 0.1252,
"step": 660
},
{
"epoch": 4.053846153846154,
"grad_norm": 20.754961013793945,
"learning_rate": 7.834757834757835e-06,
"loss": 0.2335,
"step": 670
},
{
"epoch": 4.066666666666666,
"grad_norm": 1.776369571685791,
"learning_rate": 7.122507122507123e-06,
"loss": 0.1772,
"step": 680
},
{
"epoch": 4.079487179487179,
"grad_norm": 0.9859512448310852,
"learning_rate": 6.41025641025641e-06,
"loss": 0.1654,
"step": 690
},
{
"epoch": 4.092307692307692,
"grad_norm": 9.265326499938965,
"learning_rate": 5.6980056980056985e-06,
"loss": 0.1738,
"step": 700
},
{
"epoch": 4.105128205128205,
"grad_norm": 17.748327255249023,
"learning_rate": 4.985754985754986e-06,
"loss": 0.3303,
"step": 710
},
{
"epoch": 4.1179487179487175,
"grad_norm": 0.5790041089057922,
"learning_rate": 4.273504273504274e-06,
"loss": 0.2181,
"step": 720
},
{
"epoch": 4.130769230769231,
"grad_norm": 0.2577972710132599,
"learning_rate": 3.5612535612535615e-06,
"loss": 0.0101,
"step": 730
},
{
"epoch": 4.143589743589744,
"grad_norm": 27.089906692504883,
"learning_rate": 2.8490028490028492e-06,
"loss": 0.1569,
"step": 740
},
{
"epoch": 4.156410256410257,
"grad_norm": 0.044243063777685165,
"learning_rate": 2.136752136752137e-06,
"loss": 0.1688,
"step": 750
},
{
"epoch": 4.1692307692307695,
"grad_norm": 13.373856544494629,
"learning_rate": 1.4245014245014246e-06,
"loss": 0.0592,
"step": 760
},
{
"epoch": 4.182051282051282,
"grad_norm": 3.7931740283966064,
"learning_rate": 7.122507122507123e-07,
"loss": 0.2266,
"step": 770
},
{
"epoch": 4.194871794871795,
"grad_norm": 6.501523971557617,
"learning_rate": 0.0,
"loss": 0.1579,
"step": 780
},
{
"epoch": 4.194871794871795,
"eval_accuracy": 0.8561643835616438,
"eval_loss": 0.5852294564247131,
"eval_runtime": 44.3219,
"eval_samples_per_second": 3.294,
"eval_steps_per_second": 0.835,
"step": 780
},
{
"epoch": 4.194871794871795,
"step": 780,
"total_flos": 3.8777530024360673e+18,
"train_loss": 0.3928673116060404,
"train_runtime": 2196.6027,
"train_samples_per_second": 1.42,
"train_steps_per_second": 0.355
},
{
"epoch": 4.194871794871795,
"eval_accuracy": 0.8874172185430463,
"eval_loss": 0.3803229033946991,
"eval_runtime": 88.7221,
"eval_samples_per_second": 1.702,
"eval_steps_per_second": 0.428,
"step": 780
},
{
"epoch": 4.194871794871795,
"eval_accuracy": 0.8874172185430463,
"eval_loss": 0.38032299280166626,
"eval_runtime": 51.2971,
"eval_samples_per_second": 2.944,
"eval_steps_per_second": 0.741,
"step": 780
}
],
"logging_steps": 10,
"max_steps": 780,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 3.8777530024360673e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}