videomae-surf-analytics-v2 / trainer_state.json
2nzi's picture
Model save
a949f01 verified
raw
history blame contribute delete
No virus
36.9 kB
{
"best_metric": 0.8839858413628905,
"best_model_checkpoint": "videomae-surf-analytics-sans-wandb\\checkpoint-1850",
"epoch": 4.197837837837838,
"eval_steps": 500,
"global_step": 1850,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005405405405405406,
"grad_norm": 11.531332969665527,
"learning_rate": 2.702702702702703e-06,
"loss": 1.4317,
"step": 10
},
{
"epoch": 0.010810810810810811,
"grad_norm": 11.917078971862793,
"learning_rate": 5.405405405405406e-06,
"loss": 1.3912,
"step": 20
},
{
"epoch": 0.016216216216216217,
"grad_norm": 10.456705093383789,
"learning_rate": 8.108108108108109e-06,
"loss": 1.3411,
"step": 30
},
{
"epoch": 0.021621621621621623,
"grad_norm": 10.632660865783691,
"learning_rate": 1.0810810810810812e-05,
"loss": 1.2104,
"step": 40
},
{
"epoch": 0.02702702702702703,
"grad_norm": 10.985200881958008,
"learning_rate": 1.3513513513513515e-05,
"loss": 1.4311,
"step": 50
},
{
"epoch": 0.032432432432432434,
"grad_norm": 12.063570976257324,
"learning_rate": 1.6216216216216218e-05,
"loss": 0.9259,
"step": 60
},
{
"epoch": 0.03783783783783784,
"grad_norm": 8.3969144821167,
"learning_rate": 1.891891891891892e-05,
"loss": 1.2204,
"step": 70
},
{
"epoch": 0.043243243243243246,
"grad_norm": 13.087193489074707,
"learning_rate": 2.1621621621621624e-05,
"loss": 1.215,
"step": 80
},
{
"epoch": 0.04864864864864865,
"grad_norm": 12.476021766662598,
"learning_rate": 2.4324324324324327e-05,
"loss": 1.4883,
"step": 90
},
{
"epoch": 0.05405405405405406,
"grad_norm": 10.030627250671387,
"learning_rate": 2.702702702702703e-05,
"loss": 1.2363,
"step": 100
},
{
"epoch": 0.05945945945945946,
"grad_norm": 15.547861099243164,
"learning_rate": 2.9729729729729733e-05,
"loss": 1.4148,
"step": 110
},
{
"epoch": 0.06486486486486487,
"grad_norm": 11.590038299560547,
"learning_rate": 3.2432432432432436e-05,
"loss": 1.3528,
"step": 120
},
{
"epoch": 0.07027027027027027,
"grad_norm": 12.326528549194336,
"learning_rate": 3.513513513513514e-05,
"loss": 1.2941,
"step": 130
},
{
"epoch": 0.07567567567567568,
"grad_norm": 8.991989135742188,
"learning_rate": 3.783783783783784e-05,
"loss": 1.1322,
"step": 140
},
{
"epoch": 0.08108108108108109,
"grad_norm": 17.619140625,
"learning_rate": 4.0540540540540545e-05,
"loss": 1.2129,
"step": 150
},
{
"epoch": 0.08648648648648649,
"grad_norm": 11.91755485534668,
"learning_rate": 4.324324324324325e-05,
"loss": 1.2466,
"step": 160
},
{
"epoch": 0.0918918918918919,
"grad_norm": 9.150936126708984,
"learning_rate": 4.594594594594595e-05,
"loss": 1.2624,
"step": 170
},
{
"epoch": 0.0972972972972973,
"grad_norm": 12.12965202331543,
"learning_rate": 4.8648648648648654e-05,
"loss": 1.2423,
"step": 180
},
{
"epoch": 0.10270270270270271,
"grad_norm": 11.131547927856445,
"learning_rate": 4.984984984984985e-05,
"loss": 1.0887,
"step": 190
},
{
"epoch": 0.10810810810810811,
"grad_norm": 16.159334182739258,
"learning_rate": 4.954954954954955e-05,
"loss": 1.2204,
"step": 200
},
{
"epoch": 0.11351351351351352,
"grad_norm": 14.13427734375,
"learning_rate": 4.9249249249249253e-05,
"loss": 1.2002,
"step": 210
},
{
"epoch": 0.11891891891891893,
"grad_norm": 6.860474586486816,
"learning_rate": 4.8948948948948954e-05,
"loss": 1.6223,
"step": 220
},
{
"epoch": 0.12432432432432433,
"grad_norm": 17.881906509399414,
"learning_rate": 4.8648648648648654e-05,
"loss": 1.3443,
"step": 230
},
{
"epoch": 0.12972972972972974,
"grad_norm": 9.10927963256836,
"learning_rate": 4.834834834834835e-05,
"loss": 1.4485,
"step": 240
},
{
"epoch": 0.13513513513513514,
"grad_norm": 6.366971015930176,
"learning_rate": 4.804804804804805e-05,
"loss": 0.9077,
"step": 250
},
{
"epoch": 0.14054054054054055,
"grad_norm": 22.29749870300293,
"learning_rate": 4.774774774774775e-05,
"loss": 1.473,
"step": 260
},
{
"epoch": 0.14594594594594595,
"grad_norm": 19.388036727905273,
"learning_rate": 4.744744744744745e-05,
"loss": 1.1213,
"step": 270
},
{
"epoch": 0.15135135135135136,
"grad_norm": 6.246539115905762,
"learning_rate": 4.714714714714715e-05,
"loss": 0.7922,
"step": 280
},
{
"epoch": 0.15675675675675677,
"grad_norm": 8.21817684173584,
"learning_rate": 4.684684684684685e-05,
"loss": 1.2757,
"step": 290
},
{
"epoch": 0.16216216216216217,
"grad_norm": 18.414051055908203,
"learning_rate": 4.654654654654655e-05,
"loss": 1.3382,
"step": 300
},
{
"epoch": 0.16756756756756758,
"grad_norm": 11.922811508178711,
"learning_rate": 4.624624624624625e-05,
"loss": 0.9546,
"step": 310
},
{
"epoch": 0.17297297297297298,
"grad_norm": 15.078409194946289,
"learning_rate": 4.594594594594595e-05,
"loss": 1.7355,
"step": 320
},
{
"epoch": 0.1783783783783784,
"grad_norm": 10.318249702453613,
"learning_rate": 4.5645645645645645e-05,
"loss": 1.2742,
"step": 330
},
{
"epoch": 0.1837837837837838,
"grad_norm": 12.838833808898926,
"learning_rate": 4.5345345345345345e-05,
"loss": 1.5675,
"step": 340
},
{
"epoch": 0.1891891891891892,
"grad_norm": 12.560096740722656,
"learning_rate": 4.5045045045045046e-05,
"loss": 1.3238,
"step": 350
},
{
"epoch": 0.1945945945945946,
"grad_norm": 3.9458367824554443,
"learning_rate": 4.4744744744744746e-05,
"loss": 1.4337,
"step": 360
},
{
"epoch": 0.2,
"grad_norm": 14.40661334991455,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.3096,
"step": 370
},
{
"epoch": 0.20054054054054055,
"eval_accuracy": 0.45901639344262296,
"eval_f1": 0.28881930373917847,
"eval_loss": 1.3266738653182983,
"eval_runtime": 589.7403,
"eval_samples_per_second": 0.207,
"eval_steps_per_second": 0.103,
"step": 371
},
{
"epoch": 1.0048648648648648,
"grad_norm": 11.33437728881836,
"learning_rate": 4.414414414414415e-05,
"loss": 1.1584,
"step": 380
},
{
"epoch": 1.0102702702702702,
"grad_norm": 12.032054901123047,
"learning_rate": 4.384384384384385e-05,
"loss": 1.3792,
"step": 390
},
{
"epoch": 1.0156756756756757,
"grad_norm": 10.13355541229248,
"learning_rate": 4.354354354354355e-05,
"loss": 1.3946,
"step": 400
},
{
"epoch": 1.021081081081081,
"grad_norm": 8.573601722717285,
"learning_rate": 4.324324324324325e-05,
"loss": 1.4136,
"step": 410
},
{
"epoch": 1.0264864864864864,
"grad_norm": 6.312205791473389,
"learning_rate": 4.294294294294294e-05,
"loss": 1.3542,
"step": 420
},
{
"epoch": 1.0318918918918918,
"grad_norm": 9.014967918395996,
"learning_rate": 4.264264264264264e-05,
"loss": 1.3646,
"step": 430
},
{
"epoch": 1.0372972972972974,
"grad_norm": 8.805109977722168,
"learning_rate": 4.234234234234234e-05,
"loss": 1.2612,
"step": 440
},
{
"epoch": 1.0427027027027027,
"grad_norm": 5.080739974975586,
"learning_rate": 4.204204204204204e-05,
"loss": 1.1235,
"step": 450
},
{
"epoch": 1.048108108108108,
"grad_norm": 9.063738822937012,
"learning_rate": 4.1741741741741744e-05,
"loss": 1.1287,
"step": 460
},
{
"epoch": 1.0535135135135134,
"grad_norm": 10.316679954528809,
"learning_rate": 4.1441441441441444e-05,
"loss": 1.043,
"step": 470
},
{
"epoch": 1.058918918918919,
"grad_norm": 7.451129913330078,
"learning_rate": 4.1141141141141144e-05,
"loss": 1.1908,
"step": 480
},
{
"epoch": 1.0643243243243243,
"grad_norm": 4.216222286224365,
"learning_rate": 4.0840840840840845e-05,
"loss": 1.132,
"step": 490
},
{
"epoch": 1.0697297297297297,
"grad_norm": 9.235713005065918,
"learning_rate": 4.0540540540540545e-05,
"loss": 1.7454,
"step": 500
},
{
"epoch": 1.075135135135135,
"grad_norm": 12.876686096191406,
"learning_rate": 4.024024024024024e-05,
"loss": 1.0389,
"step": 510
},
{
"epoch": 1.0805405405405406,
"grad_norm": 9.994712829589844,
"learning_rate": 3.993993993993994e-05,
"loss": 1.0837,
"step": 520
},
{
"epoch": 1.085945945945946,
"grad_norm": 15.395247459411621,
"learning_rate": 3.963963963963964e-05,
"loss": 1.4464,
"step": 530
},
{
"epoch": 1.0913513513513513,
"grad_norm": 8.740015029907227,
"learning_rate": 3.933933933933934e-05,
"loss": 1.0569,
"step": 540
},
{
"epoch": 1.0967567567567567,
"grad_norm": 6.516251087188721,
"learning_rate": 3.903903903903904e-05,
"loss": 1.2919,
"step": 550
},
{
"epoch": 1.1021621621621622,
"grad_norm": 9.043025970458984,
"learning_rate": 3.873873873873874e-05,
"loss": 1.2064,
"step": 560
},
{
"epoch": 1.1075675675675676,
"grad_norm": 7.937928199768066,
"learning_rate": 3.843843843843844e-05,
"loss": 1.1334,
"step": 570
},
{
"epoch": 1.112972972972973,
"grad_norm": 7.148234844207764,
"learning_rate": 3.813813813813814e-05,
"loss": 1.221,
"step": 580
},
{
"epoch": 1.1183783783783783,
"grad_norm": 18.380756378173828,
"learning_rate": 3.783783783783784e-05,
"loss": 1.0975,
"step": 590
},
{
"epoch": 1.1237837837837839,
"grad_norm": 14.88819408416748,
"learning_rate": 3.7537537537537536e-05,
"loss": 0.7921,
"step": 600
},
{
"epoch": 1.1291891891891892,
"grad_norm": 29.024171829223633,
"learning_rate": 3.7237237237237236e-05,
"loss": 1.1379,
"step": 610
},
{
"epoch": 1.1345945945945946,
"grad_norm": 17.080385208129883,
"learning_rate": 3.693693693693694e-05,
"loss": 0.7285,
"step": 620
},
{
"epoch": 1.1400000000000001,
"grad_norm": 5.5788187980651855,
"learning_rate": 3.663663663663664e-05,
"loss": 1.2987,
"step": 630
},
{
"epoch": 1.1454054054054055,
"grad_norm": 8.62403392791748,
"learning_rate": 3.633633633633634e-05,
"loss": 1.162,
"step": 640
},
{
"epoch": 1.1508108108108108,
"grad_norm": 13.667816162109375,
"learning_rate": 3.603603603603604e-05,
"loss": 1.1039,
"step": 650
},
{
"epoch": 1.1562162162162162,
"grad_norm": 15.54757022857666,
"learning_rate": 3.573573573573574e-05,
"loss": 1.1578,
"step": 660
},
{
"epoch": 1.1616216216216215,
"grad_norm": 21.381877899169922,
"learning_rate": 3.543543543543544e-05,
"loss": 1.1655,
"step": 670
},
{
"epoch": 1.167027027027027,
"grad_norm": 16.7823429107666,
"learning_rate": 3.513513513513514e-05,
"loss": 1.2388,
"step": 680
},
{
"epoch": 1.1724324324324324,
"grad_norm": 0.6257322430610657,
"learning_rate": 3.483483483483483e-05,
"loss": 0.4094,
"step": 690
},
{
"epoch": 1.1778378378378378,
"grad_norm": 6.534117221832275,
"learning_rate": 3.453453453453453e-05,
"loss": 1.1549,
"step": 700
},
{
"epoch": 1.1832432432432431,
"grad_norm": 13.602584838867188,
"learning_rate": 3.4234234234234234e-05,
"loss": 1.077,
"step": 710
},
{
"epoch": 1.1886486486486487,
"grad_norm": 18.255807876586914,
"learning_rate": 3.3933933933933934e-05,
"loss": 1.1547,
"step": 720
},
{
"epoch": 1.194054054054054,
"grad_norm": 0.5059702396392822,
"learning_rate": 3.3633633633633635e-05,
"loss": 1.1022,
"step": 730
},
{
"epoch": 1.1994594594594594,
"grad_norm": 0.5154173970222473,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.0586,
"step": 740
},
{
"epoch": 1.2005405405405405,
"eval_accuracy": 0.5819672131147541,
"eval_f1": 0.5034862905182809,
"eval_loss": 1.2865582704544067,
"eval_runtime": 398.9746,
"eval_samples_per_second": 0.306,
"eval_steps_per_second": 0.153,
"step": 742
},
{
"epoch": 2.0043243243243243,
"grad_norm": 1.7890368700027466,
"learning_rate": 3.3033033033033035e-05,
"loss": 0.5098,
"step": 750
},
{
"epoch": 2.0097297297297296,
"grad_norm": 0.3416980504989624,
"learning_rate": 3.2732732732732736e-05,
"loss": 0.4915,
"step": 760
},
{
"epoch": 2.015135135135135,
"grad_norm": 38.26604461669922,
"learning_rate": 3.2432432432432436e-05,
"loss": 1.6502,
"step": 770
},
{
"epoch": 2.0205405405405403,
"grad_norm": 0.615081787109375,
"learning_rate": 3.213213213213213e-05,
"loss": 0.5299,
"step": 780
},
{
"epoch": 2.025945945945946,
"grad_norm": 0.5157684683799744,
"learning_rate": 3.183183183183183e-05,
"loss": 0.9929,
"step": 790
},
{
"epoch": 2.0313513513513515,
"grad_norm": 7.0944318771362305,
"learning_rate": 3.153153153153153e-05,
"loss": 0.836,
"step": 800
},
{
"epoch": 2.036756756756757,
"grad_norm": 35.1708984375,
"learning_rate": 3.123123123123123e-05,
"loss": 0.6723,
"step": 810
},
{
"epoch": 2.042162162162162,
"grad_norm": 21.07259750366211,
"learning_rate": 3.093093093093093e-05,
"loss": 1.307,
"step": 820
},
{
"epoch": 2.0475675675675675,
"grad_norm": 8.620600700378418,
"learning_rate": 3.063063063063063e-05,
"loss": 0.7787,
"step": 830
},
{
"epoch": 2.052972972972973,
"grad_norm": 5.618999004364014,
"learning_rate": 3.0330330330330332e-05,
"loss": 0.8469,
"step": 840
},
{
"epoch": 2.0583783783783782,
"grad_norm": 23.006450653076172,
"learning_rate": 3.0030030030030033e-05,
"loss": 1.4332,
"step": 850
},
{
"epoch": 2.0637837837837836,
"grad_norm": 28.410768508911133,
"learning_rate": 2.9729729729729733e-05,
"loss": 1.1213,
"step": 860
},
{
"epoch": 2.0691891891891894,
"grad_norm": 35.26121139526367,
"learning_rate": 2.9429429429429427e-05,
"loss": 1.1048,
"step": 870
},
{
"epoch": 2.0745945945945947,
"grad_norm": 10.972575187683105,
"learning_rate": 2.912912912912913e-05,
"loss": 0.7834,
"step": 880
},
{
"epoch": 2.08,
"grad_norm": 4.512319087982178,
"learning_rate": 2.882882882882883e-05,
"loss": 0.3903,
"step": 890
},
{
"epoch": 2.0854054054054054,
"grad_norm": 14.238494873046875,
"learning_rate": 2.852852852852853e-05,
"loss": 1.626,
"step": 900
},
{
"epoch": 2.0908108108108108,
"grad_norm": 15.402420997619629,
"learning_rate": 2.8228228228228232e-05,
"loss": 1.1367,
"step": 910
},
{
"epoch": 2.096216216216216,
"grad_norm": 1.233629822731018,
"learning_rate": 2.7927927927927926e-05,
"loss": 0.844,
"step": 920
},
{
"epoch": 2.1016216216216215,
"grad_norm": 25.64818572998047,
"learning_rate": 2.762762762762763e-05,
"loss": 0.8664,
"step": 930
},
{
"epoch": 2.107027027027027,
"grad_norm": 0.6849324703216553,
"learning_rate": 2.732732732732733e-05,
"loss": 0.5829,
"step": 940
},
{
"epoch": 2.1124324324324326,
"grad_norm": 2.735356092453003,
"learning_rate": 2.702702702702703e-05,
"loss": 0.4426,
"step": 950
},
{
"epoch": 2.117837837837838,
"grad_norm": 40.63231658935547,
"learning_rate": 2.672672672672673e-05,
"loss": 0.4518,
"step": 960
},
{
"epoch": 2.1232432432432433,
"grad_norm": 5.400685787200928,
"learning_rate": 2.6426426426426428e-05,
"loss": 0.4123,
"step": 970
},
{
"epoch": 2.1286486486486487,
"grad_norm": 39.79267883300781,
"learning_rate": 2.6126126126126128e-05,
"loss": 0.8665,
"step": 980
},
{
"epoch": 2.134054054054054,
"grad_norm": 26.877548217773438,
"learning_rate": 2.582582582582583e-05,
"loss": 0.9006,
"step": 990
},
{
"epoch": 2.1394594594594594,
"grad_norm": 17.957853317260742,
"learning_rate": 2.552552552552553e-05,
"loss": 0.1425,
"step": 1000
},
{
"epoch": 2.1448648648648647,
"grad_norm": 1.0850050449371338,
"learning_rate": 2.5225225225225222e-05,
"loss": 0.6361,
"step": 1010
},
{
"epoch": 2.15027027027027,
"grad_norm": 65.59326171875,
"learning_rate": 2.4924924924924926e-05,
"loss": 0.5456,
"step": 1020
},
{
"epoch": 2.155675675675676,
"grad_norm": 75.51756286621094,
"learning_rate": 2.4624624624624627e-05,
"loss": 0.5168,
"step": 1030
},
{
"epoch": 2.161081081081081,
"grad_norm": 0.284772127866745,
"learning_rate": 2.4324324324324327e-05,
"loss": 0.5227,
"step": 1040
},
{
"epoch": 2.1664864864864866,
"grad_norm": 57.656734466552734,
"learning_rate": 2.4024024024024024e-05,
"loss": 0.978,
"step": 1050
},
{
"epoch": 2.171891891891892,
"grad_norm": 5.7262468338012695,
"learning_rate": 2.3723723723723725e-05,
"loss": 0.4327,
"step": 1060
},
{
"epoch": 2.1772972972972973,
"grad_norm": 0.4865647256374359,
"learning_rate": 2.3423423423423425e-05,
"loss": 0.5062,
"step": 1070
},
{
"epoch": 2.1827027027027026,
"grad_norm": 30.51907730102539,
"learning_rate": 2.3123123123123125e-05,
"loss": 0.3505,
"step": 1080
},
{
"epoch": 2.188108108108108,
"grad_norm": 0.11615642160177231,
"learning_rate": 2.2822822822822822e-05,
"loss": 0.7081,
"step": 1090
},
{
"epoch": 2.1935135135135133,
"grad_norm": 30.363637924194336,
"learning_rate": 2.2522522522522523e-05,
"loss": 0.0567,
"step": 1100
},
{
"epoch": 2.198918918918919,
"grad_norm": 0.29021716117858887,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.9781,
"step": 1110
},
{
"epoch": 2.2005405405405405,
"eval_accuracy": 0.7459016393442623,
"eval_f1": 0.7466341940069591,
"eval_loss": 0.7952272891998291,
"eval_runtime": 615.8439,
"eval_samples_per_second": 0.198,
"eval_steps_per_second": 0.099,
"step": 1113
},
{
"epoch": 3.003783783783784,
"grad_norm": 0.5570007562637329,
"learning_rate": 2.1921921921921924e-05,
"loss": 0.8402,
"step": 1120
},
{
"epoch": 3.0091891891891893,
"grad_norm": 0.30576571822166443,
"learning_rate": 2.1621621621621624e-05,
"loss": 0.5837,
"step": 1130
},
{
"epoch": 3.0145945945945947,
"grad_norm": 0.4280945956707001,
"learning_rate": 2.132132132132132e-05,
"loss": 0.609,
"step": 1140
},
{
"epoch": 3.02,
"grad_norm": 0.037259869277477264,
"learning_rate": 2.102102102102102e-05,
"loss": 0.1856,
"step": 1150
},
{
"epoch": 3.0254054054054054,
"grad_norm": 0.031064892187714577,
"learning_rate": 2.0720720720720722e-05,
"loss": 0.1205,
"step": 1160
},
{
"epoch": 3.0308108108108107,
"grad_norm": 0.029491625726222992,
"learning_rate": 2.0420420420420422e-05,
"loss": 0.4757,
"step": 1170
},
{
"epoch": 3.036216216216216,
"grad_norm": 0.06313157826662064,
"learning_rate": 2.012012012012012e-05,
"loss": 0.54,
"step": 1180
},
{
"epoch": 3.0416216216216214,
"grad_norm": 44.53404235839844,
"learning_rate": 1.981981981981982e-05,
"loss": 0.7178,
"step": 1190
},
{
"epoch": 3.047027027027027,
"grad_norm": 28.65042495727539,
"learning_rate": 1.951951951951952e-05,
"loss": 0.2756,
"step": 1200
},
{
"epoch": 3.0524324324324326,
"grad_norm": 3.5685808658599854,
"learning_rate": 1.921921921921922e-05,
"loss": 0.2041,
"step": 1210
},
{
"epoch": 3.057837837837838,
"grad_norm": 100.94510650634766,
"learning_rate": 1.891891891891892e-05,
"loss": 0.1725,
"step": 1220
},
{
"epoch": 3.0632432432432433,
"grad_norm": 69.8983383178711,
"learning_rate": 1.8618618618618618e-05,
"loss": 0.8736,
"step": 1230
},
{
"epoch": 3.0686486486486486,
"grad_norm": 15.938907623291016,
"learning_rate": 1.831831831831832e-05,
"loss": 0.3815,
"step": 1240
},
{
"epoch": 3.074054054054054,
"grad_norm": 14.810787200927734,
"learning_rate": 1.801801801801802e-05,
"loss": 0.3635,
"step": 1250
},
{
"epoch": 3.0794594594594593,
"grad_norm": 18.970157623291016,
"learning_rate": 1.771771771771772e-05,
"loss": 0.3695,
"step": 1260
},
{
"epoch": 3.0848648648648647,
"grad_norm": 0.7603211998939514,
"learning_rate": 1.7417417417417416e-05,
"loss": 0.4226,
"step": 1270
},
{
"epoch": 3.0902702702702705,
"grad_norm": 0.04897380620241165,
"learning_rate": 1.7117117117117117e-05,
"loss": 0.0055,
"step": 1280
},
{
"epoch": 3.095675675675676,
"grad_norm": 0.04457269236445427,
"learning_rate": 1.6816816816816817e-05,
"loss": 0.2734,
"step": 1290
},
{
"epoch": 3.101081081081081,
"grad_norm": 11.417162895202637,
"learning_rate": 1.6516516516516518e-05,
"loss": 0.0319,
"step": 1300
},
{
"epoch": 3.1064864864864865,
"grad_norm": 73.96791076660156,
"learning_rate": 1.6216216216216218e-05,
"loss": 0.3966,
"step": 1310
},
{
"epoch": 3.111891891891892,
"grad_norm": 0.051069825887680054,
"learning_rate": 1.5915915915915915e-05,
"loss": 0.5875,
"step": 1320
},
{
"epoch": 3.117297297297297,
"grad_norm": 0.6849485635757446,
"learning_rate": 1.5615615615615616e-05,
"loss": 0.233,
"step": 1330
},
{
"epoch": 3.1227027027027026,
"grad_norm": 0.09105366468429565,
"learning_rate": 1.5315315315315316e-05,
"loss": 0.4759,
"step": 1340
},
{
"epoch": 3.128108108108108,
"grad_norm": 0.02636696584522724,
"learning_rate": 1.5015015015015016e-05,
"loss": 0.1464,
"step": 1350
},
{
"epoch": 3.1335135135135137,
"grad_norm": 9.379318237304688,
"learning_rate": 1.4714714714714713e-05,
"loss": 0.2798,
"step": 1360
},
{
"epoch": 3.138918918918919,
"grad_norm": 0.11463417857885361,
"learning_rate": 1.4414414414414416e-05,
"loss": 0.3684,
"step": 1370
},
{
"epoch": 3.1443243243243244,
"grad_norm": 28.1702823638916,
"learning_rate": 1.4114114114114116e-05,
"loss": 0.3324,
"step": 1380
},
{
"epoch": 3.1497297297297298,
"grad_norm": 0.024587994441390038,
"learning_rate": 1.3813813813813815e-05,
"loss": 0.3369,
"step": 1390
},
{
"epoch": 3.155135135135135,
"grad_norm": 0.03437427058815956,
"learning_rate": 1.3513513513513515e-05,
"loss": 0.2755,
"step": 1400
},
{
"epoch": 3.1605405405405405,
"grad_norm": 0.034997034817934036,
"learning_rate": 1.3213213213213214e-05,
"loss": 0.0174,
"step": 1410
},
{
"epoch": 3.165945945945946,
"grad_norm": 50.559940338134766,
"learning_rate": 1.2912912912912914e-05,
"loss": 0.3477,
"step": 1420
},
{
"epoch": 3.171351351351351,
"grad_norm": 1.0858763456344604,
"learning_rate": 1.2612612612612611e-05,
"loss": 0.1806,
"step": 1430
},
{
"epoch": 3.176756756756757,
"grad_norm": 95.9212417602539,
"learning_rate": 1.2312312312312313e-05,
"loss": 0.3487,
"step": 1440
},
{
"epoch": 3.1821621621621623,
"grad_norm": 35.4121208190918,
"learning_rate": 1.2012012012012012e-05,
"loss": 0.3323,
"step": 1450
},
{
"epoch": 3.1875675675675677,
"grad_norm": 0.050625383853912354,
"learning_rate": 1.1711711711711713e-05,
"loss": 0.227,
"step": 1460
},
{
"epoch": 3.192972972972973,
"grad_norm": 1.5548391342163086,
"learning_rate": 1.1411411411411411e-05,
"loss": 0.797,
"step": 1470
},
{
"epoch": 3.1983783783783784,
"grad_norm": 0.6227735877037048,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0034,
"step": 1480
},
{
"epoch": 3.2005405405405405,
"eval_accuracy": 0.8360655737704918,
"eval_f1": 0.8343035855751879,
"eval_loss": 0.7218076586723328,
"eval_runtime": 507.9346,
"eval_samples_per_second": 0.24,
"eval_steps_per_second": 0.12,
"step": 1484
},
{
"epoch": 4.003243243243244,
"grad_norm": 0.12078996747732162,
"learning_rate": 1.0810810810810812e-05,
"loss": 0.8434,
"step": 1490
},
{
"epoch": 4.008648648648649,
"grad_norm": 0.06610610336065292,
"learning_rate": 1.051051051051051e-05,
"loss": 0.4016,
"step": 1500
},
{
"epoch": 4.014054054054054,
"grad_norm": 0.03932103142142296,
"learning_rate": 1.0210210210210211e-05,
"loss": 0.1955,
"step": 1510
},
{
"epoch": 4.019459459459459,
"grad_norm": 0.05058516561985016,
"learning_rate": 9.90990990990991e-06,
"loss": 0.063,
"step": 1520
},
{
"epoch": 4.024864864864865,
"grad_norm": 15.977706909179688,
"learning_rate": 9.60960960960961e-06,
"loss": 0.1466,
"step": 1530
},
{
"epoch": 4.03027027027027,
"grad_norm": 12.75985050201416,
"learning_rate": 9.309309309309309e-06,
"loss": 0.0269,
"step": 1540
},
{
"epoch": 4.035675675675676,
"grad_norm": 65.94677734375,
"learning_rate": 9.00900900900901e-06,
"loss": 0.3077,
"step": 1550
},
{
"epoch": 4.041081081081081,
"grad_norm": 0.05467180907726288,
"learning_rate": 8.708708708708708e-06,
"loss": 0.4931,
"step": 1560
},
{
"epoch": 4.0464864864864865,
"grad_norm": 0.03017774224281311,
"learning_rate": 8.408408408408409e-06,
"loss": 0.0026,
"step": 1570
},
{
"epoch": 4.051891891891892,
"grad_norm": 0.03868546336889267,
"learning_rate": 8.108108108108109e-06,
"loss": 0.6954,
"step": 1580
},
{
"epoch": 4.057297297297297,
"grad_norm": 0.025062745437026024,
"learning_rate": 7.807807807807808e-06,
"loss": 0.2377,
"step": 1590
},
{
"epoch": 4.062702702702703,
"grad_norm": 6.950279712677002,
"learning_rate": 7.507507507507508e-06,
"loss": 0.006,
"step": 1600
},
{
"epoch": 4.068108108108108,
"grad_norm": 0.03524166718125343,
"learning_rate": 7.207207207207208e-06,
"loss": 0.0245,
"step": 1610
},
{
"epoch": 4.073513513513514,
"grad_norm": 0.02506636269390583,
"learning_rate": 6.906906906906907e-06,
"loss": 0.003,
"step": 1620
},
{
"epoch": 4.078918918918919,
"grad_norm": 0.0216878280043602,
"learning_rate": 6.606606606606607e-06,
"loss": 0.0095,
"step": 1630
},
{
"epoch": 4.084324324324324,
"grad_norm": 0.01947568915784359,
"learning_rate": 6.306306306306306e-06,
"loss": 0.0029,
"step": 1640
},
{
"epoch": 4.08972972972973,
"grad_norm": 0.17050786316394806,
"learning_rate": 6.006006006006006e-06,
"loss": 0.32,
"step": 1650
},
{
"epoch": 4.095135135135135,
"grad_norm": 0.025170955806970596,
"learning_rate": 5.705705705705706e-06,
"loss": 0.1245,
"step": 1660
},
{
"epoch": 4.100540540540541,
"grad_norm": 0.024301931262016296,
"learning_rate": 5.405405405405406e-06,
"loss": 0.1942,
"step": 1670
},
{
"epoch": 4.105945945945946,
"grad_norm": 0.03195532411336899,
"learning_rate": 5.105105105105106e-06,
"loss": 0.0019,
"step": 1680
},
{
"epoch": 4.1113513513513515,
"grad_norm": 0.09079870581626892,
"learning_rate": 4.804804804804805e-06,
"loss": 0.0139,
"step": 1690
},
{
"epoch": 4.1167567567567565,
"grad_norm": 0.02011438086628914,
"learning_rate": 4.504504504504505e-06,
"loss": 0.0021,
"step": 1700
},
{
"epoch": 4.122162162162162,
"grad_norm": 0.016972195357084274,
"learning_rate": 4.204204204204204e-06,
"loss": 0.0021,
"step": 1710
},
{
"epoch": 4.127567567567567,
"grad_norm": 17.876731872558594,
"learning_rate": 3.903903903903904e-06,
"loss": 0.4547,
"step": 1720
},
{
"epoch": 4.132972972972973,
"grad_norm": 0.1327604353427887,
"learning_rate": 3.603603603603604e-06,
"loss": 0.0154,
"step": 1730
},
{
"epoch": 4.138378378378379,
"grad_norm": 0.0263642817735672,
"learning_rate": 3.3033033033033035e-06,
"loss": 0.3324,
"step": 1740
},
{
"epoch": 4.143783783783784,
"grad_norm": 56.30789566040039,
"learning_rate": 3.003003003003003e-06,
"loss": 0.3968,
"step": 1750
},
{
"epoch": 4.149189189189189,
"grad_norm": 0.2022549957036972,
"learning_rate": 2.702702702702703e-06,
"loss": 0.3955,
"step": 1760
},
{
"epoch": 4.154594594594594,
"grad_norm": 0.02462898939847946,
"learning_rate": 2.4024024024024026e-06,
"loss": 0.0026,
"step": 1770
},
{
"epoch": 4.16,
"grad_norm": 0.030352883040905,
"learning_rate": 2.102102102102102e-06,
"loss": 0.0999,
"step": 1780
},
{
"epoch": 4.165405405405405,
"grad_norm": 85.04549407958984,
"learning_rate": 1.801801801801802e-06,
"loss": 0.1927,
"step": 1790
},
{
"epoch": 4.170810810810811,
"grad_norm": 1.2970046997070312,
"learning_rate": 1.5015015015015015e-06,
"loss": 0.0034,
"step": 1800
},
{
"epoch": 4.176216216216217,
"grad_norm": 0.02083350159227848,
"learning_rate": 1.2012012012012013e-06,
"loss": 0.014,
"step": 1810
},
{
"epoch": 4.1816216216216215,
"grad_norm": 0.4275698959827423,
"learning_rate": 9.00900900900901e-07,
"loss": 0.2991,
"step": 1820
},
{
"epoch": 4.187027027027027,
"grad_norm": 0.022126413881778717,
"learning_rate": 6.006006006006006e-07,
"loss": 0.2785,
"step": 1830
},
{
"epoch": 4.192432432432432,
"grad_norm": 0.020599039271473885,
"learning_rate": 3.003003003003003e-07,
"loss": 0.0024,
"step": 1840
},
{
"epoch": 4.197837837837838,
"grad_norm": 0.04799671843647957,
"learning_rate": 0.0,
"loss": 0.1895,
"step": 1850
},
{
"epoch": 4.197837837837838,
"eval_accuracy": 0.8852459016393442,
"eval_f1": 0.8839858413628905,
"eval_loss": 0.5544242262840271,
"eval_runtime": 710.05,
"eval_samples_per_second": 0.172,
"eval_steps_per_second": 0.086,
"step": 1850
},
{
"epoch": 4.197837837837838,
"step": 1850,
"total_flos": 4.605536866872066e+18,
"train_loss": 0.7479323436269486,
"train_runtime": 45711.7743,
"train_samples_per_second": 0.081,
"train_steps_per_second": 0.04
},
{
"epoch": 4.197837837837838,
"eval_accuracy": 0.9784075573549258,
"eval_f1": 0.9783336380382718,
"eval_loss": 0.09701067209243774,
"eval_runtime": 4652.924,
"eval_samples_per_second": 0.159,
"eval_steps_per_second": 0.08,
"step": 1850
},
{
"epoch": 4.197837837837838,
"eval_accuracy": 0.851528384279476,
"eval_f1": 0.8496851913824908,
"eval_loss": 0.7380829453468323,
"eval_runtime": 1382.3283,
"eval_samples_per_second": 0.166,
"eval_steps_per_second": 0.083,
"step": 1850
},
{
"epoch": 4.197837837837838,
"eval_accuracy": 0.8852459016393442,
"eval_f1": 0.8839858413628905,
"eval_loss": 0.5544241070747375,
"eval_runtime": 784.0148,
"eval_samples_per_second": 0.156,
"eval_steps_per_second": 0.078,
"step": 1850
}
],
"logging_steps": 10,
"max_steps": 1850,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.605536866872066e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}