SLM_vs_LLM_experiments
/
distilbert
/distilbert_base_uncased_patent
/checkpoint-1100
/trainer_state.json
{ | |
"best_metric": 0.9810600876808167, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/distilbert/distilbert_base_uncased_patent/checkpoint-1100", | |
"epoch": 2.813299232736573, | |
"eval_steps": 50, | |
"global_step": 1100, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"grad_norm": 1.5254361629486084, | |
"learning_rate": 1.9829497016197786e-05, | |
"loss": 2.1132, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 2.268423318862915, | |
"learning_rate": 1.9658994032395567e-05, | |
"loss": 1.9257, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 1.968531608581543, | |
"learning_rate": 1.9488491048593352e-05, | |
"loss": 1.7735, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 2.6922695636749268, | |
"learning_rate": 1.9317988064791137e-05, | |
"loss": 1.6531, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.13, | |
"grad_norm": 2.287513256072998, | |
"learning_rate": 1.9147485080988918e-05, | |
"loss": 1.5572, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.13, | |
"eval_accuracy": 0.504, | |
"eval_f1_macro": 0.31714078576589094, | |
"eval_f1_micro": 0.504, | |
"eval_loss": 1.488370656967163, | |
"eval_runtime": 2.5317, | |
"eval_samples_per_second": 1974.973, | |
"eval_steps_per_second": 31.205, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.15, | |
"grad_norm": 2.2814247608184814, | |
"learning_rate": 1.8976982097186702e-05, | |
"loss": 1.4715, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.18, | |
"grad_norm": 2.2706797122955322, | |
"learning_rate": 1.8806479113384487e-05, | |
"loss": 1.4136, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.2, | |
"grad_norm": 2.97063946723938, | |
"learning_rate": 1.863597612958227e-05, | |
"loss": 1.4057, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.23, | |
"grad_norm": 3.6843924522399902, | |
"learning_rate": 1.8465473145780053e-05, | |
"loss": 1.3567, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 2.7738821506500244, | |
"learning_rate": 1.8294970161977838e-05, | |
"loss": 1.2925, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.26, | |
"eval_accuracy": 0.5634, | |
"eval_f1_macro": 0.38027100016924736, | |
"eval_f1_micro": 0.5634, | |
"eval_loss": 1.2877275943756104, | |
"eval_runtime": 2.485, | |
"eval_samples_per_second": 2012.068, | |
"eval_steps_per_second": 31.791, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.28, | |
"grad_norm": 3.8574423789978027, | |
"learning_rate": 1.812446717817562e-05, | |
"loss": 1.3244, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.31, | |
"grad_norm": 3.5320255756378174, | |
"learning_rate": 1.7953964194373403e-05, | |
"loss": 1.3081, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.33, | |
"grad_norm": 4.041069030761719, | |
"learning_rate": 1.7783461210571188e-05, | |
"loss": 1.2123, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.36, | |
"grad_norm": 3.909712076187134, | |
"learning_rate": 1.761295822676897e-05, | |
"loss": 1.213, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.38, | |
"grad_norm": 3.420614004135132, | |
"learning_rate": 1.7442455242966754e-05, | |
"loss": 1.253, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.38, | |
"eval_accuracy": 0.5974, | |
"eval_f1_macro": 0.4161954643564434, | |
"eval_f1_micro": 0.5974, | |
"eval_loss": 1.2013535499572754, | |
"eval_runtime": 2.5407, | |
"eval_samples_per_second": 1967.977, | |
"eval_steps_per_second": 31.094, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.41, | |
"grad_norm": 3.2803194522857666, | |
"learning_rate": 1.727195225916454e-05, | |
"loss": 1.1558, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.43, | |
"grad_norm": 4.284536838531494, | |
"learning_rate": 1.710144927536232e-05, | |
"loss": 1.264, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.46, | |
"grad_norm": 3.806030511856079, | |
"learning_rate": 1.6930946291560104e-05, | |
"loss": 1.2071, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.49, | |
"grad_norm": 3.609832763671875, | |
"learning_rate": 1.676044330775789e-05, | |
"loss": 1.2812, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 3.784940481185913, | |
"learning_rate": 1.658994032395567e-05, | |
"loss": 1.1591, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.51, | |
"eval_accuracy": 0.6102, | |
"eval_f1_macro": 0.4468130555229773, | |
"eval_f1_micro": 0.6102, | |
"eval_loss": 1.155762791633606, | |
"eval_runtime": 2.5475, | |
"eval_samples_per_second": 1962.683, | |
"eval_steps_per_second": 31.01, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.54, | |
"grad_norm": 3.536844253540039, | |
"learning_rate": 1.6419437340153455e-05, | |
"loss": 1.2104, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.56, | |
"grad_norm": 3.73691987991333, | |
"learning_rate": 1.624893435635124e-05, | |
"loss": 1.1493, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.59, | |
"grad_norm": 3.1416776180267334, | |
"learning_rate": 1.607843137254902e-05, | |
"loss": 1.1358, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.61, | |
"grad_norm": 5.260790824890137, | |
"learning_rate": 1.5907928388746805e-05, | |
"loss": 1.157, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.64, | |
"grad_norm": 3.9229576587677, | |
"learning_rate": 1.573742540494459e-05, | |
"loss": 1.1756, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.64, | |
"eval_accuracy": 0.6244, | |
"eval_f1_macro": 0.47252612841211433, | |
"eval_f1_micro": 0.6244, | |
"eval_loss": 1.1150593757629395, | |
"eval_runtime": 2.5487, | |
"eval_samples_per_second": 1961.759, | |
"eval_steps_per_second": 30.996, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 4.1067376136779785, | |
"learning_rate": 1.556692242114237e-05, | |
"loss": 1.1872, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.69, | |
"grad_norm": 2.924532651901245, | |
"learning_rate": 1.5396419437340155e-05, | |
"loss": 1.1213, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.72, | |
"grad_norm": 3.9958877563476562, | |
"learning_rate": 1.5225916453537938e-05, | |
"loss": 1.0359, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 4.634373664855957, | |
"learning_rate": 1.5055413469735723e-05, | |
"loss": 1.1428, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.77, | |
"grad_norm": 3.5679373741149902, | |
"learning_rate": 1.4884910485933506e-05, | |
"loss": 1.1078, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.77, | |
"eval_accuracy": 0.6268, | |
"eval_f1_macro": 0.4911663115074776, | |
"eval_f1_micro": 0.6268, | |
"eval_loss": 1.1123437881469727, | |
"eval_runtime": 2.5586, | |
"eval_samples_per_second": 1954.199, | |
"eval_steps_per_second": 30.876, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 4.359470367431641, | |
"learning_rate": 1.4714407502131289e-05, | |
"loss": 1.1273, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.82, | |
"grad_norm": 3.406463384628296, | |
"learning_rate": 1.4543904518329073e-05, | |
"loss": 1.125, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.84, | |
"grad_norm": 3.5030438899993896, | |
"learning_rate": 1.4373401534526856e-05, | |
"loss": 1.0957, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.87, | |
"grad_norm": 3.138608694076538, | |
"learning_rate": 1.420289855072464e-05, | |
"loss": 1.1255, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.9, | |
"grad_norm": 4.761528491973877, | |
"learning_rate": 1.4032395566922424e-05, | |
"loss": 1.1463, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.9, | |
"eval_accuracy": 0.627, | |
"eval_f1_macro": 0.5030300609672864, | |
"eval_f1_micro": 0.627, | |
"eval_loss": 1.083245873451233, | |
"eval_runtime": 2.5556, | |
"eval_samples_per_second": 1956.491, | |
"eval_steps_per_second": 30.913, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.92, | |
"grad_norm": 3.7402660846710205, | |
"learning_rate": 1.3861892583120207e-05, | |
"loss": 1.1053, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 4.072522163391113, | |
"learning_rate": 1.369138959931799e-05, | |
"loss": 1.1052, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.97, | |
"grad_norm": 4.845565319061279, | |
"learning_rate": 1.3520886615515774e-05, | |
"loss": 1.0088, | |
"step": 380 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 3.917280912399292, | |
"learning_rate": 1.3350383631713557e-05, | |
"loss": 1.1336, | |
"step": 390 | |
}, | |
{ | |
"epoch": 1.02, | |
"grad_norm": 4.51039981842041, | |
"learning_rate": 1.317988064791134e-05, | |
"loss": 1.0328, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.02, | |
"eval_accuracy": 0.6432, | |
"eval_f1_macro": 0.5067962117978813, | |
"eval_f1_micro": 0.6432, | |
"eval_loss": 1.061031699180603, | |
"eval_runtime": 2.5598, | |
"eval_samples_per_second": 1953.242, | |
"eval_steps_per_second": 30.861, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.05, | |
"grad_norm": 3.588453531265259, | |
"learning_rate": 1.3009377664109125e-05, | |
"loss": 1.0195, | |
"step": 410 | |
}, | |
{ | |
"epoch": 1.07, | |
"grad_norm": 2.9895548820495605, | |
"learning_rate": 1.2838874680306906e-05, | |
"loss": 0.9881, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.1, | |
"grad_norm": 4.128742694854736, | |
"learning_rate": 1.2668371696504689e-05, | |
"loss": 0.9993, | |
"step": 430 | |
}, | |
{ | |
"epoch": 1.13, | |
"grad_norm": 4.684874534606934, | |
"learning_rate": 1.2497868712702472e-05, | |
"loss": 1.0498, | |
"step": 440 | |
}, | |
{ | |
"epoch": 1.15, | |
"grad_norm": 3.209024667739868, | |
"learning_rate": 1.2327365728900256e-05, | |
"loss": 0.9224, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.15, | |
"eval_accuracy": 0.6476, | |
"eval_f1_macro": 0.5153111994840125, | |
"eval_f1_micro": 0.6476, | |
"eval_loss": 1.0462298393249512, | |
"eval_runtime": 2.5617, | |
"eval_samples_per_second": 1951.835, | |
"eval_steps_per_second": 30.839, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 3.0769877433776855, | |
"learning_rate": 1.215686274509804e-05, | |
"loss": 1.007, | |
"step": 460 | |
}, | |
{ | |
"epoch": 1.2, | |
"grad_norm": 4.804540634155273, | |
"learning_rate": 1.1986359761295822e-05, | |
"loss": 1.0038, | |
"step": 470 | |
}, | |
{ | |
"epoch": 1.23, | |
"grad_norm": 3.50314998626709, | |
"learning_rate": 1.1815856777493607e-05, | |
"loss": 0.8935, | |
"step": 480 | |
}, | |
{ | |
"epoch": 1.25, | |
"grad_norm": 4.707066059112549, | |
"learning_rate": 1.164535379369139e-05, | |
"loss": 0.9407, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.28, | |
"grad_norm": 4.0851616859436035, | |
"learning_rate": 1.1474850809889173e-05, | |
"loss": 0.9902, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.28, | |
"eval_accuracy": 0.6448, | |
"eval_f1_macro": 0.5167948177458488, | |
"eval_f1_micro": 0.6448, | |
"eval_loss": 1.0401391983032227, | |
"eval_runtime": 2.5108, | |
"eval_samples_per_second": 1991.368, | |
"eval_steps_per_second": 31.464, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.3, | |
"grad_norm": 4.020449161529541, | |
"learning_rate": 1.1304347826086957e-05, | |
"loss": 1.0025, | |
"step": 510 | |
}, | |
{ | |
"epoch": 1.33, | |
"grad_norm": 5.4637956619262695, | |
"learning_rate": 1.113384484228474e-05, | |
"loss": 1.0383, | |
"step": 520 | |
}, | |
{ | |
"epoch": 1.36, | |
"grad_norm": 5.55945348739624, | |
"learning_rate": 1.0963341858482523e-05, | |
"loss": 0.9689, | |
"step": 530 | |
}, | |
{ | |
"epoch": 1.38, | |
"grad_norm": 4.01898193359375, | |
"learning_rate": 1.0792838874680308e-05, | |
"loss": 0.9961, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.41, | |
"grad_norm": 3.1464662551879883, | |
"learning_rate": 1.062233589087809e-05, | |
"loss": 0.9681, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.41, | |
"eval_accuracy": 0.6546, | |
"eval_f1_macro": 0.5216104456730791, | |
"eval_f1_micro": 0.6546, | |
"eval_loss": 1.025282859802246, | |
"eval_runtime": 2.5614, | |
"eval_samples_per_second": 1952.089, | |
"eval_steps_per_second": 30.843, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.43, | |
"grad_norm": 4.61997127532959, | |
"learning_rate": 1.0451832907075873e-05, | |
"loss": 0.9857, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.46, | |
"grad_norm": 5.920619964599609, | |
"learning_rate": 1.0281329923273658e-05, | |
"loss": 0.9589, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.48, | |
"grad_norm": 4.216548919677734, | |
"learning_rate": 1.0110826939471441e-05, | |
"loss": 0.9166, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.51, | |
"grad_norm": 4.430570125579834, | |
"learning_rate": 9.940323955669226e-06, | |
"loss": 0.8543, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 4.501844882965088, | |
"learning_rate": 9.769820971867009e-06, | |
"loss": 0.9657, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.53, | |
"eval_accuracy": 0.6564, | |
"eval_f1_macro": 0.524810059031177, | |
"eval_f1_micro": 0.6564, | |
"eval_loss": 1.0122859477996826, | |
"eval_runtime": 2.5621, | |
"eval_samples_per_second": 1951.533, | |
"eval_steps_per_second": 30.834, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.56, | |
"grad_norm": 4.6970343589782715, | |
"learning_rate": 9.599317988064793e-06, | |
"loss": 0.9968, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.59, | |
"grad_norm": 4.157351493835449, | |
"learning_rate": 9.428815004262576e-06, | |
"loss": 0.9846, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.61, | |
"grad_norm": 5.208338737487793, | |
"learning_rate": 9.258312020460359e-06, | |
"loss": 1.0631, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.64, | |
"grad_norm": 4.8189568519592285, | |
"learning_rate": 9.087809036658142e-06, | |
"loss": 0.8888, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.66, | |
"grad_norm": 3.9659152030944824, | |
"learning_rate": 8.917306052855925e-06, | |
"loss": 0.9742, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.66, | |
"eval_accuracy": 0.656, | |
"eval_f1_macro": 0.526278827235069, | |
"eval_f1_micro": 0.656, | |
"eval_loss": 1.0185636281967163, | |
"eval_runtime": 2.563, | |
"eval_samples_per_second": 1950.805, | |
"eval_steps_per_second": 30.823, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 4.112762928009033, | |
"learning_rate": 8.74680306905371e-06, | |
"loss": 0.9615, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.71, | |
"grad_norm": 3.6878299713134766, | |
"learning_rate": 8.576300085251492e-06, | |
"loss": 1.0321, | |
"step": 670 | |
}, | |
{ | |
"epoch": 1.74, | |
"grad_norm": 3.897886276245117, | |
"learning_rate": 8.405797101449275e-06, | |
"loss": 1.0277, | |
"step": 680 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 3.835379123687744, | |
"learning_rate": 8.23529411764706e-06, | |
"loss": 0.8851, | |
"step": 690 | |
}, | |
{ | |
"epoch": 1.79, | |
"grad_norm": 6.2837910652160645, | |
"learning_rate": 8.064791133844843e-06, | |
"loss": 0.9443, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.79, | |
"eval_accuracy": 0.66, | |
"eval_f1_macro": 0.5279438958302367, | |
"eval_f1_micro": 0.66, | |
"eval_loss": 1.0028008222579956, | |
"eval_runtime": 2.5633, | |
"eval_samples_per_second": 1950.59, | |
"eval_steps_per_second": 30.819, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.82, | |
"grad_norm": 3.7148256301879883, | |
"learning_rate": 7.894288150042626e-06, | |
"loss": 0.9798, | |
"step": 710 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 5.453797340393066, | |
"learning_rate": 7.72378516624041e-06, | |
"loss": 1.0153, | |
"step": 720 | |
}, | |
{ | |
"epoch": 1.87, | |
"grad_norm": 4.477136611938477, | |
"learning_rate": 7.553282182438193e-06, | |
"loss": 0.9463, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.89, | |
"grad_norm": 3.9449195861816406, | |
"learning_rate": 7.382779198635977e-06, | |
"loss": 1.014, | |
"step": 740 | |
}, | |
{ | |
"epoch": 1.92, | |
"grad_norm": 4.440316200256348, | |
"learning_rate": 7.21227621483376e-06, | |
"loss": 0.9944, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.92, | |
"eval_accuracy": 0.6544, | |
"eval_f1_macro": 0.5324258425472495, | |
"eval_f1_micro": 0.6544, | |
"eval_loss": 0.9999937415122986, | |
"eval_runtime": 2.5656, | |
"eval_samples_per_second": 1948.859, | |
"eval_steps_per_second": 30.792, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.94, | |
"grad_norm": 4.8219404220581055, | |
"learning_rate": 7.0417732310315436e-06, | |
"loss": 0.9727, | |
"step": 760 | |
}, | |
{ | |
"epoch": 1.97, | |
"grad_norm": 4.6290059089660645, | |
"learning_rate": 6.8712702472293265e-06, | |
"loss": 0.9227, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.99, | |
"grad_norm": 4.72649621963501, | |
"learning_rate": 6.70076726342711e-06, | |
"loss": 0.9897, | |
"step": 780 | |
}, | |
{ | |
"epoch": 2.02, | |
"grad_norm": 4.765009880065918, | |
"learning_rate": 6.530264279624894e-06, | |
"loss": 0.8902, | |
"step": 790 | |
}, | |
{ | |
"epoch": 2.05, | |
"grad_norm": 4.143588066101074, | |
"learning_rate": 6.359761295822677e-06, | |
"loss": 0.849, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.05, | |
"eval_accuracy": 0.6588, | |
"eval_f1_macro": 0.5571101896839533, | |
"eval_f1_micro": 0.6588, | |
"eval_loss": 0.9938995838165283, | |
"eval_runtime": 2.5615, | |
"eval_samples_per_second": 1951.974, | |
"eval_steps_per_second": 30.841, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.07, | |
"grad_norm": 6.541919708251953, | |
"learning_rate": 6.189258312020461e-06, | |
"loss": 0.8816, | |
"step": 810 | |
}, | |
{ | |
"epoch": 2.1, | |
"grad_norm": 3.248116970062256, | |
"learning_rate": 6.018755328218244e-06, | |
"loss": 0.8212, | |
"step": 820 | |
}, | |
{ | |
"epoch": 2.12, | |
"grad_norm": 5.380030155181885, | |
"learning_rate": 5.848252344416027e-06, | |
"loss": 0.9467, | |
"step": 830 | |
}, | |
{ | |
"epoch": 2.15, | |
"grad_norm": 4.269763469696045, | |
"learning_rate": 5.677749360613811e-06, | |
"loss": 0.9103, | |
"step": 840 | |
}, | |
{ | |
"epoch": 2.17, | |
"grad_norm": 3.740556001663208, | |
"learning_rate": 5.507246376811595e-06, | |
"loss": 0.8801, | |
"step": 850 | |
}, | |
{ | |
"epoch": 2.17, | |
"eval_accuracy": 0.6608, | |
"eval_f1_macro": 0.5618086532363267, | |
"eval_f1_micro": 0.6608, | |
"eval_loss": 0.9915580153465271, | |
"eval_runtime": 2.5597, | |
"eval_samples_per_second": 1953.365, | |
"eval_steps_per_second": 30.863, | |
"step": 850 | |
}, | |
{ | |
"epoch": 2.2, | |
"grad_norm": 4.54482364654541, | |
"learning_rate": 5.336743393009378e-06, | |
"loss": 0.9491, | |
"step": 860 | |
}, | |
{ | |
"epoch": 2.23, | |
"grad_norm": 5.141452789306641, | |
"learning_rate": 5.1662404092071615e-06, | |
"loss": 0.8731, | |
"step": 870 | |
}, | |
{ | |
"epoch": 2.25, | |
"grad_norm": 4.374069690704346, | |
"learning_rate": 4.995737425404945e-06, | |
"loss": 0.9414, | |
"step": 880 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 4.185967922210693, | |
"learning_rate": 4.825234441602728e-06, | |
"loss": 0.8969, | |
"step": 890 | |
}, | |
{ | |
"epoch": 2.3, | |
"grad_norm": 4.706201076507568, | |
"learning_rate": 4.654731457800512e-06, | |
"loss": 0.9913, | |
"step": 900 | |
}, | |
{ | |
"epoch": 2.3, | |
"eval_accuracy": 0.6634, | |
"eval_f1_macro": 0.5686296090138803, | |
"eval_f1_micro": 0.6634, | |
"eval_loss": 0.9912111163139343, | |
"eval_runtime": 2.5621, | |
"eval_samples_per_second": 1951.503, | |
"eval_steps_per_second": 30.834, | |
"step": 900 | |
}, | |
{ | |
"epoch": 2.33, | |
"grad_norm": 3.95426869392395, | |
"learning_rate": 4.484228473998296e-06, | |
"loss": 0.9097, | |
"step": 910 | |
}, | |
{ | |
"epoch": 2.35, | |
"grad_norm": 4.399575233459473, | |
"learning_rate": 4.313725490196079e-06, | |
"loss": 0.8436, | |
"step": 920 | |
}, | |
{ | |
"epoch": 2.38, | |
"grad_norm": 4.841634750366211, | |
"learning_rate": 4.143222506393862e-06, | |
"loss": 0.8442, | |
"step": 930 | |
}, | |
{ | |
"epoch": 2.4, | |
"grad_norm": 5.710248947143555, | |
"learning_rate": 3.972719522591646e-06, | |
"loss": 0.8611, | |
"step": 940 | |
}, | |
{ | |
"epoch": 2.43, | |
"grad_norm": 5.657437801361084, | |
"learning_rate": 3.802216538789429e-06, | |
"loss": 0.923, | |
"step": 950 | |
}, | |
{ | |
"epoch": 2.43, | |
"eval_accuracy": 0.666, | |
"eval_f1_macro": 0.5738930450140409, | |
"eval_f1_micro": 0.666, | |
"eval_loss": 0.9878548979759216, | |
"eval_runtime": 2.561, | |
"eval_samples_per_second": 1952.369, | |
"eval_steps_per_second": 30.847, | |
"step": 950 | |
}, | |
{ | |
"epoch": 2.46, | |
"grad_norm": 5.360414505004883, | |
"learning_rate": 3.6317135549872124e-06, | |
"loss": 0.9018, | |
"step": 960 | |
}, | |
{ | |
"epoch": 2.48, | |
"grad_norm": 4.188915252685547, | |
"learning_rate": 3.4612105711849957e-06, | |
"loss": 0.9214, | |
"step": 970 | |
}, | |
{ | |
"epoch": 2.51, | |
"grad_norm": 5.122612953186035, | |
"learning_rate": 3.2907075873827795e-06, | |
"loss": 0.7912, | |
"step": 980 | |
}, | |
{ | |
"epoch": 2.53, | |
"grad_norm": 4.7370381355285645, | |
"learning_rate": 3.120204603580563e-06, | |
"loss": 0.9217, | |
"step": 990 | |
}, | |
{ | |
"epoch": 2.56, | |
"grad_norm": 4.897758483886719, | |
"learning_rate": 2.949701619778346e-06, | |
"loss": 0.8935, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.56, | |
"eval_accuracy": 0.6642, | |
"eval_f1_macro": 0.5695049533976465, | |
"eval_f1_micro": 0.6642, | |
"eval_loss": 0.9828301072120667, | |
"eval_runtime": 2.5608, | |
"eval_samples_per_second": 1952.481, | |
"eval_steps_per_second": 30.849, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.58, | |
"grad_norm": 3.5184578895568848, | |
"learning_rate": 2.77919863597613e-06, | |
"loss": 0.8838, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 2.61, | |
"grad_norm": 3.8302197456359863, | |
"learning_rate": 2.6086956521739132e-06, | |
"loss": 0.836, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 2.63, | |
"grad_norm": 2.884185314178467, | |
"learning_rate": 2.4381926683716966e-06, | |
"loss": 0.8178, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 2.66, | |
"grad_norm": 4.5637102127075195, | |
"learning_rate": 2.2676896845694803e-06, | |
"loss": 0.8695, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 2.69, | |
"grad_norm": 5.02547550201416, | |
"learning_rate": 2.0971867007672637e-06, | |
"loss": 0.8062, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 2.69, | |
"eval_accuracy": 0.6598, | |
"eval_f1_macro": 0.5690831153854151, | |
"eval_f1_micro": 0.6598, | |
"eval_loss": 0.987727165222168, | |
"eval_runtime": 2.5591, | |
"eval_samples_per_second": 1953.824, | |
"eval_steps_per_second": 30.87, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 2.71, | |
"grad_norm": 3.8958821296691895, | |
"learning_rate": 1.926683716965047e-06, | |
"loss": 0.8607, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 2.74, | |
"grad_norm": 4.481761455535889, | |
"learning_rate": 1.7561807331628305e-06, | |
"loss": 0.8746, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 2.76, | |
"grad_norm": 4.964443206787109, | |
"learning_rate": 1.585677749360614e-06, | |
"loss": 0.8464, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 4.224003791809082, | |
"learning_rate": 1.4151747655583974e-06, | |
"loss": 0.8263, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 2.81, | |
"grad_norm": 4.156431198120117, | |
"learning_rate": 1.2446717817561808e-06, | |
"loss": 0.853, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 2.81, | |
"eval_accuracy": 0.6632, | |
"eval_f1_macro": 0.5700577621793445, | |
"eval_f1_micro": 0.6632, | |
"eval_loss": 0.9810600876808167, | |
"eval_runtime": 2.5585, | |
"eval_samples_per_second": 1954.257, | |
"eval_steps_per_second": 30.877, | |
"step": 1100 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1173, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 2331717234851840.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |