vuongnhathien's picture
End of training
046dabb verified
raw
history blame contribute delete
No virus
20.7 kB
{
"best_metric": 0.2377403974533081,
"best_model_checkpoint": "./convnext-base-3e-5-weight-decay-2e-8/checkpoint-4396",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 10990,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": 17.935943603515625,
"learning_rate": 2.999387175598269e-05,
"loss": 2.4621,
"step": 100
},
{
"epoch": 0.18,
"grad_norm": 31.654447555541992,
"learning_rate": 2.9975492031314045e-05,
"loss": 1.2945,
"step": 200
},
{
"epoch": 0.27,
"grad_norm": 18.324377059936523,
"learning_rate": 2.994487584405244e-05,
"loss": 0.908,
"step": 300
},
{
"epoch": 0.36,
"grad_norm": 25.15399742126465,
"learning_rate": 2.990204821066006e-05,
"loss": 0.6822,
"step": 400
},
{
"epoch": 0.45,
"grad_norm": 33.575416564941406,
"learning_rate": 2.984704412556199e-05,
"loss": 0.6458,
"step": 500
},
{
"epoch": 0.55,
"grad_norm": 18.706640243530273,
"learning_rate": 2.977990853255228e-05,
"loss": 0.5474,
"step": 600
},
{
"epoch": 0.64,
"grad_norm": 23.474363327026367,
"learning_rate": 2.970069628807043e-05,
"loss": 0.5737,
"step": 700
},
{
"epoch": 0.73,
"grad_norm": 35.13369369506836,
"learning_rate": 2.9609472116378222e-05,
"loss": 0.5333,
"step": 800
},
{
"epoch": 0.82,
"grad_norm": 11.39519214630127,
"learning_rate": 2.9506310556673573e-05,
"loss": 0.4716,
"step": 900
},
{
"epoch": 0.91,
"grad_norm": 10.556333541870117,
"learning_rate": 2.9391295902184625e-05,
"loss": 0.4785,
"step": 1000
},
{
"epoch": 1.0,
"eval_accuracy": 0.8870775347912525,
"eval_loss": 0.36941519379615784,
"eval_runtime": 103.8148,
"eval_samples_per_second": 24.226,
"eval_steps_per_second": 1.522,
"step": 1099
},
{
"epoch": 1.0,
"grad_norm": 21.9882869720459,
"learning_rate": 2.9264522131293818e-05,
"loss": 0.5245,
"step": 1100
},
{
"epoch": 1.09,
"grad_norm": 9.284809112548828,
"learning_rate": 2.9126092830748217e-05,
"loss": 0.389,
"step": 1200
},
{
"epoch": 1.18,
"grad_norm": 13.87375545501709,
"learning_rate": 2.897612111101888e-05,
"loss": 0.3786,
"step": 1300
},
{
"epoch": 1.27,
"grad_norm": 13.774240493774414,
"learning_rate": 2.8814729513878365e-05,
"loss": 0.3904,
"step": 1400
},
{
"epoch": 1.36,
"grad_norm": 21.452919006347656,
"learning_rate": 2.864204991227195e-05,
"loss": 0.3436,
"step": 1500
},
{
"epoch": 1.46,
"grad_norm": 17.817153930664062,
"learning_rate": 2.8458223402564366e-05,
"loss": 0.3232,
"step": 1600
},
{
"epoch": 1.55,
"grad_norm": 14.883545875549316,
"learning_rate": 2.826340018925006e-05,
"loss": 0.3993,
"step": 1700
},
{
"epoch": 1.64,
"grad_norm": 18.39521026611328,
"learning_rate": 2.8057739462221215e-05,
"loss": 0.3769,
"step": 1800
},
{
"epoch": 1.73,
"grad_norm": 13.766119003295898,
"learning_rate": 2.7841409266693838e-05,
"loss": 0.3511,
"step": 1900
},
{
"epoch": 1.82,
"grad_norm": 16.02475929260254,
"learning_rate": 2.761458636589813e-05,
"loss": 0.3387,
"step": 2000
},
{
"epoch": 1.91,
"grad_norm": 13.445148468017578,
"learning_rate": 2.7377456096645395e-05,
"loss": 0.3622,
"step": 2100
},
{
"epoch": 2.0,
"eval_accuracy": 0.9184890656063618,
"eval_loss": 0.30202704668045044,
"eval_runtime": 104.2599,
"eval_samples_per_second": 24.122,
"eval_steps_per_second": 1.515,
"step": 2198
},
{
"epoch": 2.0,
"grad_norm": 31.912105560302734,
"learning_rate": 2.7130212217889484e-05,
"loss": 0.3488,
"step": 2200
},
{
"epoch": 2.09,
"grad_norm": 18.74437141418457,
"learning_rate": 2.6873056752406504e-05,
"loss": 0.3051,
"step": 2300
},
{
"epoch": 2.18,
"grad_norm": 21.47634506225586,
"learning_rate": 2.6606199821722166e-05,
"loss": 0.2934,
"step": 2400
},
{
"epoch": 2.27,
"grad_norm": 7.397056579589844,
"learning_rate": 2.632985947442167e-05,
"loss": 0.2585,
"step": 2500
},
{
"epoch": 2.37,
"grad_norm": 8.826918601989746,
"learning_rate": 2.6044261507982356e-05,
"loss": 0.3157,
"step": 2600
},
{
"epoch": 2.46,
"grad_norm": 17.997589111328125,
"learning_rate": 2.5749639284274782e-05,
"loss": 0.2847,
"step": 2700
},
{
"epoch": 2.55,
"grad_norm": 2.118089437484741,
"learning_rate": 2.5446233538882924e-05,
"loss": 0.3001,
"step": 2800
},
{
"epoch": 2.64,
"grad_norm": 18.02204704284668,
"learning_rate": 2.513429218439932e-05,
"loss": 0.3153,
"step": 2900
},
{
"epoch": 2.73,
"grad_norm": 14.180878639221191,
"learning_rate": 2.4814070107855878e-05,
"loss": 0.2657,
"step": 3000
},
{
"epoch": 2.82,
"grad_norm": 16.476423263549805,
"learning_rate": 2.448582896245591e-05,
"loss": 0.2698,
"step": 3100
},
{
"epoch": 2.91,
"grad_norm": 11.466832160949707,
"learning_rate": 2.4149836953777488e-05,
"loss": 0.2714,
"step": 3200
},
{
"epoch": 3.0,
"eval_accuracy": 0.9292246520874752,
"eval_loss": 0.26775774359703064,
"eval_runtime": 104.9396,
"eval_samples_per_second": 23.966,
"eval_steps_per_second": 1.506,
"step": 3297
},
{
"epoch": 3.0,
"grad_norm": 18.808837890625,
"learning_rate": 2.3806368620622876e-05,
"loss": 0.2994,
"step": 3300
},
{
"epoch": 3.09,
"grad_norm": 2.240368127822876,
"learning_rate": 2.345570461069312e-05,
"loss": 0.2174,
"step": 3400
},
{
"epoch": 3.18,
"grad_norm": 2.480295181274414,
"learning_rate": 2.3098131451271016e-05,
"loss": 0.2375,
"step": 3500
},
{
"epoch": 3.28,
"grad_norm": 12.673290252685547,
"learning_rate": 2.2733941315099883e-05,
"loss": 0.2446,
"step": 3600
},
{
"epoch": 3.37,
"grad_norm": 9.802734375,
"learning_rate": 2.2363431781649483e-05,
"loss": 0.2237,
"step": 3700
},
{
"epoch": 3.46,
"grad_norm": 21.503725051879883,
"learning_rate": 2.1986905593964048e-05,
"loss": 0.2049,
"step": 3800
},
{
"epoch": 3.55,
"grad_norm": 9.363218307495117,
"learning_rate": 2.1604670411291174e-05,
"loss": 0.2587,
"step": 3900
},
{
"epoch": 3.64,
"grad_norm": 5.982770919799805,
"learning_rate": 2.121703855769373e-05,
"loss": 0.2448,
"step": 4000
},
{
"epoch": 3.73,
"grad_norm": 10.533409118652344,
"learning_rate": 2.0824326766850072e-05,
"loss": 0.2426,
"step": 4100
},
{
"epoch": 3.82,
"grad_norm": 0.05731818825006485,
"learning_rate": 2.042685592325123e-05,
"loss": 0.2105,
"step": 4200
},
{
"epoch": 3.91,
"grad_norm": 11.535008430480957,
"learning_rate": 2.0024950800006463e-05,
"loss": 0.2277,
"step": 4300
},
{
"epoch": 4.0,
"eval_accuracy": 0.9391650099403579,
"eval_loss": 0.2377403974533081,
"eval_runtime": 103.8401,
"eval_samples_per_second": 24.22,
"eval_steps_per_second": 1.522,
"step": 4396
},
{
"epoch": 4.0,
"grad_norm": 10.935998916625977,
"learning_rate": 1.961893979347137e-05,
"loss": 0.2175,
"step": 4400
},
{
"epoch": 4.09,
"grad_norm": 10.798638343811035,
"learning_rate": 1.9209154654915524e-05,
"loss": 0.1995,
"step": 4500
},
{
"epoch": 4.19,
"grad_norm": 13.328634262084961,
"learning_rate": 1.879593021944875e-05,
"loss": 0.1908,
"step": 4600
},
{
"epoch": 4.28,
"grad_norm": 29.5804386138916,
"learning_rate": 1.837960413242765e-05,
"loss": 0.1951,
"step": 4700
},
{
"epoch": 4.37,
"grad_norm": 0.5516547560691833,
"learning_rate": 1.796051657356582e-05,
"loss": 0.1875,
"step": 4800
},
{
"epoch": 4.46,
"grad_norm": 6.66653299331665,
"learning_rate": 1.7539009978973312e-05,
"loss": 0.1875,
"step": 4900
},
{
"epoch": 4.55,
"grad_norm": 22.318578720092773,
"learning_rate": 1.711542876135233e-05,
"loss": 0.1669,
"step": 5000
},
{
"epoch": 4.64,
"grad_norm": 3.0123414993286133,
"learning_rate": 1.669011902857791e-05,
"loss": 0.1814,
"step": 5100
},
{
"epoch": 4.73,
"grad_norm": 8.864391326904297,
"learning_rate": 1.6263428300893422e-05,
"loss": 0.1966,
"step": 5200
},
{
"epoch": 4.82,
"grad_norm": 27.810791015625,
"learning_rate": 1.5835705226952112e-05,
"loss": 0.1844,
"step": 5300
},
{
"epoch": 4.91,
"grad_norm": 30.675161361694336,
"learning_rate": 1.540729929893649e-05,
"loss": 0.2028,
"step": 5400
},
{
"epoch": 5.0,
"eval_accuracy": 0.9391650099403579,
"eval_loss": 0.25951632857322693,
"eval_runtime": 104.2587,
"eval_samples_per_second": 24.123,
"eval_steps_per_second": 1.515,
"step": 5495
},
{
"epoch": 5.0,
"grad_norm": 0.7745651602745056,
"learning_rate": 1.4978560566988603e-05,
"loss": 0.1933,
"step": 5500
},
{
"epoch": 5.1,
"grad_norm": 9.74758529663086,
"learning_rate": 1.454983935318433e-05,
"loss": 0.1531,
"step": 5600
},
{
"epoch": 5.19,
"grad_norm": 7.249210834503174,
"learning_rate": 1.4121485965285485e-05,
"loss": 0.1546,
"step": 5700
},
{
"epoch": 5.28,
"grad_norm": 14.33100414276123,
"learning_rate": 1.3693850410503614e-05,
"loss": 0.1491,
"step": 5800
},
{
"epoch": 5.37,
"grad_norm": 0.042251575738191605,
"learning_rate": 1.326728210950942e-05,
"loss": 0.1544,
"step": 5900
},
{
"epoch": 5.46,
"grad_norm": 0.08964452892541885,
"learning_rate": 1.2842129610921378e-05,
"loss": 0.1453,
"step": 6000
},
{
"epoch": 5.55,
"grad_norm": 8.64229679107666,
"learning_rate": 1.2418740306506923e-05,
"loss": 0.1901,
"step": 6100
},
{
"epoch": 5.64,
"grad_norm": 6.262851238250732,
"learning_rate": 1.1997460147328984e-05,
"loss": 0.1454,
"step": 6200
},
{
"epoch": 5.73,
"grad_norm": 11.136585235595703,
"learning_rate": 1.1578633361069559e-05,
"loss": 0.1942,
"step": 6300
},
{
"epoch": 5.82,
"grad_norm": 2.0731401443481445,
"learning_rate": 1.1162602170761611e-05,
"loss": 0.1559,
"step": 6400
},
{
"epoch": 5.91,
"grad_norm": 26.187849044799805,
"learning_rate": 1.0749706515158863e-05,
"loss": 0.1738,
"step": 6500
},
{
"epoch": 6.0,
"eval_accuracy": 0.9411530815109344,
"eval_loss": 0.24843762814998627,
"eval_runtime": 103.7765,
"eval_samples_per_second": 24.235,
"eval_steps_per_second": 1.523,
"step": 6594
},
{
"epoch": 6.01,
"grad_norm": 16.86968994140625,
"learning_rate": 1.0340283770972167e-05,
"loss": 0.1741,
"step": 6600
},
{
"epoch": 6.1,
"grad_norm": 29.712677001953125,
"learning_rate": 9.93466847719919e-06,
"loss": 0.1372,
"step": 6700
},
{
"epoch": 6.19,
"grad_norm": 19.351139068603516,
"learning_rate": 9.533192061772919e-06,
"loss": 0.1317,
"step": 6800
},
{
"epoch": 6.28,
"grad_norm": 13.787877082824707,
"learning_rate": 9.136182570752153e-06,
"loss": 0.1291,
"step": 6900
},
{
"epoch": 6.37,
"grad_norm": 0.04230346158146858,
"learning_rate": 8.743964400275304e-06,
"loss": 0.1213,
"step": 7000
},
{
"epoch": 6.46,
"grad_norm": 0.025999082252383232,
"learning_rate": 8.356858031496596e-06,
"loss": 0.1556,
"step": 7100
},
{
"epoch": 6.55,
"grad_norm": 1.7087092399597168,
"learning_rate": 7.975179768721187e-06,
"loss": 0.1291,
"step": 7200
},
{
"epoch": 6.64,
"grad_norm": 0.30428436398506165,
"learning_rate": 7.599241480953112e-06,
"loss": 0.1388,
"step": 7300
},
{
"epoch": 6.73,
"grad_norm": 29.38679313659668,
"learning_rate": 7.229350347067426e-06,
"loss": 0.1346,
"step": 7400
},
{
"epoch": 6.82,
"grad_norm": 21.710355758666992,
"learning_rate": 6.865808604814564e-06,
"loss": 0.1163,
"step": 7500
},
{
"epoch": 6.92,
"grad_norm": 9.085442543029785,
"learning_rate": 6.508913303862144e-06,
"loss": 0.1651,
"step": 7600
},
{
"epoch": 7.0,
"eval_accuracy": 0.9399602385685885,
"eval_loss": 0.26307573914527893,
"eval_runtime": 107.2506,
"eval_samples_per_second": 23.45,
"eval_steps_per_second": 1.473,
"step": 7693
},
{
"epoch": 7.01,
"grad_norm": 29.07054328918457,
"learning_rate": 6.1589560630758656e-06,
"loss": 0.1045,
"step": 7700
},
{
"epoch": 7.1,
"grad_norm": 11.164506912231445,
"learning_rate": 5.8162228322380155e-06,
"loss": 0.1078,
"step": 7800
},
{
"epoch": 7.19,
"grad_norm": 0.5061851739883423,
"learning_rate": 5.480993658398129e-06,
"loss": 0.1381,
"step": 7900
},
{
"epoch": 7.28,
"grad_norm": 0.5022339820861816,
"learning_rate": 5.153542457046737e-06,
"loss": 0.1003,
"step": 8000
},
{
"epoch": 7.37,
"grad_norm": 5.520193099975586,
"learning_rate": 4.834136788299248e-06,
"loss": 0.1275,
"step": 8100
},
{
"epoch": 7.46,
"grad_norm": 1.0128473043441772,
"learning_rate": 4.523037638272822e-06,
"loss": 0.0964,
"step": 8200
},
{
"epoch": 7.55,
"grad_norm": 26.587905883789062,
"learning_rate": 4.220499205834783e-06,
"loss": 0.107,
"step": 8300
},
{
"epoch": 7.64,
"grad_norm": 3.74889874458313,
"learning_rate": 3.926768694896931e-06,
"loss": 0.1205,
"step": 8400
},
{
"epoch": 7.73,
"grad_norm": 0.6887036561965942,
"learning_rate": 3.6420861124254607e-06,
"loss": 0.1566,
"step": 8500
},
{
"epoch": 7.83,
"grad_norm": 0.371654748916626,
"learning_rate": 3.3666840723314145e-06,
"loss": 0.1218,
"step": 8600
},
{
"epoch": 7.92,
"grad_norm": 19.992464065551758,
"learning_rate": 3.1007876054020724e-06,
"loss": 0.1155,
"step": 8700
},
{
"epoch": 8.0,
"eval_accuracy": 0.9487077534791253,
"eval_loss": 0.255238801240921,
"eval_runtime": 104.337,
"eval_samples_per_second": 24.105,
"eval_steps_per_second": 1.514,
"step": 8792
},
{
"epoch": 8.01,
"grad_norm": 2.467439651489258,
"learning_rate": 2.8446139754284486e-06,
"loss": 0.1024,
"step": 8800
},
{
"epoch": 8.1,
"grad_norm": 12.735504150390625,
"learning_rate": 2.5983725016792574e-06,
"loss": 0.1194,
"step": 8900
},
{
"epoch": 8.19,
"grad_norm": 0.6167101263999939,
"learning_rate": 2.36226438786627e-06,
"loss": 0.1038,
"step": 9000
},
{
"epoch": 8.28,
"grad_norm": 31.719877243041992,
"learning_rate": 2.1364825577409424e-06,
"loss": 0.0891,
"step": 9100
},
{
"epoch": 8.37,
"grad_norm": 15.32922649383545,
"learning_rate": 1.9212114974565664e-06,
"loss": 0.1254,
"step": 9200
},
{
"epoch": 8.46,
"grad_norm": 0.023393109440803528,
"learning_rate": 1.7166271048247796e-06,
"loss": 0.0849,
"step": 9300
},
{
"epoch": 8.55,
"grad_norm": 14.578068733215332,
"learning_rate": 1.5228965455896054e-06,
"loss": 0.0979,
"step": 9400
},
{
"epoch": 8.64,
"grad_norm": 21.40913963317871,
"learning_rate": 1.3401781168364591e-06,
"loss": 0.1038,
"step": 9500
},
{
"epoch": 8.74,
"grad_norm": 0.3591146469116211,
"learning_rate": 1.1686211176477208e-06,
"loss": 0.1096,
"step": 9600
},
{
"epoch": 8.83,
"grad_norm": 18.169504165649414,
"learning_rate": 1.00836572711058e-06,
"loss": 0.1267,
"step": 9700
},
{
"epoch": 8.92,
"grad_norm": 4.115021228790283,
"learning_rate": 8.595428897768071e-07,
"loss": 0.1141,
"step": 9800
},
{
"epoch": 9.0,
"eval_accuracy": 0.9487077534791253,
"eval_loss": 0.2546464800834656,
"eval_runtime": 107.6888,
"eval_samples_per_second": 23.354,
"eval_steps_per_second": 1.467,
"step": 9891
},
{
"epoch": 9.01,
"grad_norm": 12.443672180175781,
"learning_rate": 7.222742086680756e-07,
"loss": 0.1126,
"step": 9900
},
{
"epoch": 9.1,
"grad_norm": 0.10338298976421356,
"learning_rate": 5.966718459142196e-07,
"loss": 0.0849,
"step": 10000
},
{
"epoch": 9.19,
"grad_norm": 19.319406509399414,
"learning_rate": 4.82838431105655e-07,
"loss": 0.1008,
"step": 10100
},
{
"epoch": 9.28,
"grad_norm": 0.38914769887924194,
"learning_rate": 3.808669774348167e-07,
"loss": 0.1073,
"step": 10200
},
{
"epoch": 9.37,
"grad_norm": 0.38819536566734314,
"learning_rate": 2.908408056951578e-07,
"loss": 0.0861,
"step": 10300
},
{
"epoch": 9.46,
"grad_norm": 0.26031410694122314,
"learning_rate": 2.1283347619979243e-07,
"loss": 0.0736,
"step": 10400
},
{
"epoch": 9.55,
"grad_norm": 0.12077673524618149,
"learning_rate": 1.4690872867542892e-07,
"loss": 0.1175,
"step": 10500
},
{
"epoch": 9.65,
"grad_norm": 14.284637451171875,
"learning_rate": 9.312043018067762e-08,
"loss": 0.1106,
"step": 10600
},
{
"epoch": 9.74,
"grad_norm": 9.144432067871094,
"learning_rate": 5.1512531091333914e-08,
"loss": 0.131,
"step": 10700
},
{
"epoch": 9.83,
"grad_norm": 6.6877899169921875,
"learning_rate": 2.211902918855313e-08,
"loss": 0.117,
"step": 10800
},
{
"epoch": 9.92,
"grad_norm": 4.6897478103637695,
"learning_rate": 4.963941879295164e-09,
"loss": 0.0903,
"step": 10900
},
{
"epoch": 10.0,
"eval_accuracy": 0.9479125248508946,
"eval_loss": 0.2555418610572815,
"eval_runtime": 104.5334,
"eval_samples_per_second": 24.059,
"eval_steps_per_second": 1.511,
"step": 10990
},
{
"epoch": 10.0,
"step": 10990,
"total_flos": 4.09349935387607e+19,
"train_loss": 0.2527255959896959,
"train_runtime": 16756.8112,
"train_samples_per_second": 10.492,
"train_steps_per_second": 0.656
}
],
"logging_steps": 100,
"max_steps": 10990,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 4.09349935387607e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}