SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_bert
/bert_base_uncased_patent
/trainer_state.json
{ | |
"best_metric": 0.9466402530670166, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_bert/bert_base_uncased_patent/checkpoint-1150", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 1173, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"grad_norm": 6.562531471252441, | |
"learning_rate": 1.9829497016197786e-05, | |
"loss": 2.092, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 5.556091785430908, | |
"learning_rate": 1.9658994032395567e-05, | |
"loss": 1.9217, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 3.4937126636505127, | |
"learning_rate": 1.9488491048593352e-05, | |
"loss": 1.7892, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 7.072673797607422, | |
"learning_rate": 1.9317988064791137e-05, | |
"loss": 1.6988, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.13, | |
"grad_norm": 3.746295690536499, | |
"learning_rate": 1.9147485080988918e-05, | |
"loss": 1.6046, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.13, | |
"eval_accuracy": 0.5066, | |
"eval_f1_macro": 0.3622414072151496, | |
"eval_f1_micro": 0.5066, | |
"eval_loss": 1.5103343725204468, | |
"eval_runtime": 4.5743, | |
"eval_samples_per_second": 1093.065, | |
"eval_steps_per_second": 17.27, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.15, | |
"grad_norm": 3.581632375717163, | |
"learning_rate": 1.8976982097186702e-05, | |
"loss": 1.4775, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.18, | |
"grad_norm": 4.017510890960693, | |
"learning_rate": 1.8806479113384487e-05, | |
"loss": 1.4175, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.2, | |
"grad_norm": 5.958718299865723, | |
"learning_rate": 1.863597612958227e-05, | |
"loss": 1.3982, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.23, | |
"grad_norm": 5.500441551208496, | |
"learning_rate": 1.8465473145780053e-05, | |
"loss": 1.3637, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 4.697450160980225, | |
"learning_rate": 1.8294970161977838e-05, | |
"loss": 1.3113, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.26, | |
"eval_accuracy": 0.5716, | |
"eval_f1_macro": 0.39780101459747974, | |
"eval_f1_micro": 0.5716, | |
"eval_loss": 1.2755392789840698, | |
"eval_runtime": 4.626, | |
"eval_samples_per_second": 1080.836, | |
"eval_steps_per_second": 17.077, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.28, | |
"grad_norm": 6.337512969970703, | |
"learning_rate": 1.812446717817562e-05, | |
"loss": 1.3074, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.31, | |
"grad_norm": 5.360457897186279, | |
"learning_rate": 1.7953964194373403e-05, | |
"loss": 1.2943, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.33, | |
"grad_norm": 4.803097248077393, | |
"learning_rate": 1.7783461210571188e-05, | |
"loss": 1.1829, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.36, | |
"grad_norm": 7.463130950927734, | |
"learning_rate": 1.761295822676897e-05, | |
"loss": 1.2193, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.38, | |
"grad_norm": 4.341095447540283, | |
"learning_rate": 1.7442455242966754e-05, | |
"loss": 1.2453, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.38, | |
"eval_accuracy": 0.6158, | |
"eval_f1_macro": 0.4305629680810001, | |
"eval_f1_micro": 0.6158, | |
"eval_loss": 1.1762741804122925, | |
"eval_runtime": 4.6685, | |
"eval_samples_per_second": 1071.014, | |
"eval_steps_per_second": 16.922, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.41, | |
"grad_norm": 4.234489440917969, | |
"learning_rate": 1.727195225916454e-05, | |
"loss": 1.1171, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.43, | |
"grad_norm": 5.114737033843994, | |
"learning_rate": 1.710144927536232e-05, | |
"loss": 1.2366, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.46, | |
"grad_norm": 4.452943801879883, | |
"learning_rate": 1.6930946291560104e-05, | |
"loss": 1.1706, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.49, | |
"grad_norm": 4.509500980377197, | |
"learning_rate": 1.676044330775789e-05, | |
"loss": 1.2637, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 3.85895037651062, | |
"learning_rate": 1.658994032395567e-05, | |
"loss": 1.1264, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.51, | |
"eval_accuracy": 0.622, | |
"eval_f1_macro": 0.4367754039643923, | |
"eval_f1_micro": 0.622, | |
"eval_loss": 1.1234737634658813, | |
"eval_runtime": 4.7175, | |
"eval_samples_per_second": 1059.889, | |
"eval_steps_per_second": 16.746, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.54, | |
"grad_norm": 5.441328048706055, | |
"learning_rate": 1.6419437340153455e-05, | |
"loss": 1.168, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.56, | |
"grad_norm": 4.400040149688721, | |
"learning_rate": 1.624893435635124e-05, | |
"loss": 1.0968, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.59, | |
"grad_norm": 4.579102039337158, | |
"learning_rate": 1.607843137254902e-05, | |
"loss": 1.0845, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.61, | |
"grad_norm": 6.319281101226807, | |
"learning_rate": 1.5907928388746805e-05, | |
"loss": 1.1401, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.64, | |
"grad_norm": 4.476491928100586, | |
"learning_rate": 1.573742540494459e-05, | |
"loss": 1.1753, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.64, | |
"eval_accuracy": 0.6336, | |
"eval_f1_macro": 0.4820295228988981, | |
"eval_f1_micro": 0.6336, | |
"eval_loss": 1.0747464895248413, | |
"eval_runtime": 4.7251, | |
"eval_samples_per_second": 1058.177, | |
"eval_steps_per_second": 16.719, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 5.473916053771973, | |
"learning_rate": 1.556692242114237e-05, | |
"loss": 1.1651, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.69, | |
"grad_norm": 4.395834445953369, | |
"learning_rate": 1.5396419437340155e-05, | |
"loss": 1.0921, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.72, | |
"grad_norm": 5.582020282745361, | |
"learning_rate": 1.5225916453537938e-05, | |
"loss": 1.0134, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 5.103556156158447, | |
"learning_rate": 1.5055413469735723e-05, | |
"loss": 1.1172, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.77, | |
"grad_norm": 4.819868087768555, | |
"learning_rate": 1.4884910485933506e-05, | |
"loss": 1.0741, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.77, | |
"eval_accuracy": 0.6326, | |
"eval_f1_macro": 0.47950324399687894, | |
"eval_f1_micro": 0.6326, | |
"eval_loss": 1.078079104423523, | |
"eval_runtime": 4.7399, | |
"eval_samples_per_second": 1054.865, | |
"eval_steps_per_second": 16.667, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 4.620989799499512, | |
"learning_rate": 1.4714407502131289e-05, | |
"loss": 1.0766, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.82, | |
"grad_norm": 5.047465801239014, | |
"learning_rate": 1.4543904518329073e-05, | |
"loss": 1.0606, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.84, | |
"grad_norm": 5.696692943572998, | |
"learning_rate": 1.4373401534526856e-05, | |
"loss": 1.0356, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.87, | |
"grad_norm": 4.8626813888549805, | |
"learning_rate": 1.420289855072464e-05, | |
"loss": 1.0859, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.9, | |
"grad_norm": 5.432548522949219, | |
"learning_rate": 1.4032395566922424e-05, | |
"loss": 1.0853, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.9, | |
"eval_accuracy": 0.6348, | |
"eval_f1_macro": 0.5281318273875469, | |
"eval_f1_micro": 0.6348, | |
"eval_loss": 1.0517743825912476, | |
"eval_runtime": 4.7472, | |
"eval_samples_per_second": 1053.257, | |
"eval_steps_per_second": 16.641, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.92, | |
"grad_norm": 4.216122150421143, | |
"learning_rate": 1.3861892583120207e-05, | |
"loss": 1.0463, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 5.599092960357666, | |
"learning_rate": 1.369138959931799e-05, | |
"loss": 1.0684, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.97, | |
"grad_norm": 5.5346784591674805, | |
"learning_rate": 1.3520886615515774e-05, | |
"loss": 0.9804, | |
"step": 380 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 5.399155616760254, | |
"learning_rate": 1.3350383631713557e-05, | |
"loss": 1.0891, | |
"step": 390 | |
}, | |
{ | |
"epoch": 1.02, | |
"grad_norm": 5.1199164390563965, | |
"learning_rate": 1.317988064791134e-05, | |
"loss": 0.9843, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.02, | |
"eval_accuracy": 0.6624, | |
"eval_f1_macro": 0.5756355976369484, | |
"eval_f1_micro": 0.6624, | |
"eval_loss": 1.0082650184631348, | |
"eval_runtime": 4.7556, | |
"eval_samples_per_second": 1051.383, | |
"eval_steps_per_second": 16.612, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.05, | |
"grad_norm": 3.941312074661255, | |
"learning_rate": 1.3009377664109125e-05, | |
"loss": 0.964, | |
"step": 410 | |
}, | |
{ | |
"epoch": 1.07, | |
"grad_norm": 3.8986706733703613, | |
"learning_rate": 1.2838874680306906e-05, | |
"loss": 0.9462, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.1, | |
"grad_norm": 4.786031723022461, | |
"learning_rate": 1.2668371696504689e-05, | |
"loss": 0.9585, | |
"step": 430 | |
}, | |
{ | |
"epoch": 1.13, | |
"grad_norm": 5.739943027496338, | |
"learning_rate": 1.2497868712702472e-05, | |
"loss": 1.0004, | |
"step": 440 | |
}, | |
{ | |
"epoch": 1.15, | |
"grad_norm": 4.860347270965576, | |
"learning_rate": 1.2327365728900256e-05, | |
"loss": 0.8793, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.15, | |
"eval_accuracy": 0.6602, | |
"eval_f1_macro": 0.5816153728708701, | |
"eval_f1_micro": 0.6602, | |
"eval_loss": 1.0092687606811523, | |
"eval_runtime": 4.7646, | |
"eval_samples_per_second": 1049.405, | |
"eval_steps_per_second": 16.581, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 3.9863805770874023, | |
"learning_rate": 1.215686274509804e-05, | |
"loss": 0.959, | |
"step": 460 | |
}, | |
{ | |
"epoch": 1.2, | |
"grad_norm": 5.580150604248047, | |
"learning_rate": 1.1986359761295822e-05, | |
"loss": 0.9657, | |
"step": 470 | |
}, | |
{ | |
"epoch": 1.23, | |
"grad_norm": 4.814313888549805, | |
"learning_rate": 1.1815856777493607e-05, | |
"loss": 0.8619, | |
"step": 480 | |
}, | |
{ | |
"epoch": 1.25, | |
"grad_norm": 5.757174491882324, | |
"learning_rate": 1.164535379369139e-05, | |
"loss": 0.8855, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.28, | |
"grad_norm": 4.681449890136719, | |
"learning_rate": 1.1474850809889173e-05, | |
"loss": 0.9351, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.28, | |
"eval_accuracy": 0.6636, | |
"eval_f1_macro": 0.5725178176073371, | |
"eval_f1_micro": 0.6636, | |
"eval_loss": 0.9900178909301758, | |
"eval_runtime": 4.7707, | |
"eval_samples_per_second": 1048.054, | |
"eval_steps_per_second": 16.559, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.3, | |
"grad_norm": 4.482601165771484, | |
"learning_rate": 1.1304347826086957e-05, | |
"loss": 0.9631, | |
"step": 510 | |
}, | |
{ | |
"epoch": 1.33, | |
"grad_norm": 7.99098014831543, | |
"learning_rate": 1.113384484228474e-05, | |
"loss": 0.9788, | |
"step": 520 | |
}, | |
{ | |
"epoch": 1.36, | |
"grad_norm": 7.5582733154296875, | |
"learning_rate": 1.0963341858482523e-05, | |
"loss": 0.9438, | |
"step": 530 | |
}, | |
{ | |
"epoch": 1.38, | |
"grad_norm": 5.325821876525879, | |
"learning_rate": 1.0792838874680308e-05, | |
"loss": 0.9424, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.41, | |
"grad_norm": 3.7921509742736816, | |
"learning_rate": 1.062233589087809e-05, | |
"loss": 0.9035, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.41, | |
"eval_accuracy": 0.6724, | |
"eval_f1_macro": 0.5823299205106177, | |
"eval_f1_micro": 0.6724, | |
"eval_loss": 0.9779278039932251, | |
"eval_runtime": 4.7757, | |
"eval_samples_per_second": 1046.977, | |
"eval_steps_per_second": 16.542, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.43, | |
"grad_norm": 4.93736457824707, | |
"learning_rate": 1.0451832907075873e-05, | |
"loss": 0.9182, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.46, | |
"grad_norm": 6.765481472015381, | |
"learning_rate": 1.0281329923273658e-05, | |
"loss": 0.8979, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.48, | |
"grad_norm": 5.014901638031006, | |
"learning_rate": 1.0110826939471441e-05, | |
"loss": 0.8682, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.51, | |
"grad_norm": 4.950315475463867, | |
"learning_rate": 9.940323955669226e-06, | |
"loss": 0.8111, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 5.855735778808594, | |
"learning_rate": 9.769820971867009e-06, | |
"loss": 0.9223, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.53, | |
"eval_accuracy": 0.6742, | |
"eval_f1_macro": 0.5968647036754907, | |
"eval_f1_micro": 0.6742, | |
"eval_loss": 0.9722387790679932, | |
"eval_runtime": 4.7776, | |
"eval_samples_per_second": 1046.554, | |
"eval_steps_per_second": 16.536, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.56, | |
"grad_norm": 6.120760440826416, | |
"learning_rate": 9.599317988064793e-06, | |
"loss": 0.9423, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.59, | |
"grad_norm": 4.718777656555176, | |
"learning_rate": 9.428815004262576e-06, | |
"loss": 0.9346, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.61, | |
"grad_norm": 6.342313289642334, | |
"learning_rate": 9.258312020460359e-06, | |
"loss": 1.0276, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.64, | |
"grad_norm": 5.830219745635986, | |
"learning_rate": 9.087809036658142e-06, | |
"loss": 0.8352, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.66, | |
"grad_norm": 5.947691917419434, | |
"learning_rate": 8.917306052855925e-06, | |
"loss": 0.9342, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.66, | |
"eval_accuracy": 0.6674, | |
"eval_f1_macro": 0.593125938557234, | |
"eval_f1_micro": 0.6674, | |
"eval_loss": 0.9835071563720703, | |
"eval_runtime": 4.7779, | |
"eval_samples_per_second": 1046.489, | |
"eval_steps_per_second": 16.535, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 4.4810004234313965, | |
"learning_rate": 8.74680306905371e-06, | |
"loss": 0.8965, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.71, | |
"grad_norm": 5.318411827087402, | |
"learning_rate": 8.576300085251492e-06, | |
"loss": 1.007, | |
"step": 670 | |
}, | |
{ | |
"epoch": 1.74, | |
"grad_norm": 5.331789493560791, | |
"learning_rate": 8.405797101449275e-06, | |
"loss": 0.9758, | |
"step": 680 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 4.849794387817383, | |
"learning_rate": 8.23529411764706e-06, | |
"loss": 0.8396, | |
"step": 690 | |
}, | |
{ | |
"epoch": 1.79, | |
"grad_norm": 7.840462684631348, | |
"learning_rate": 8.064791133844843e-06, | |
"loss": 0.8847, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.79, | |
"eval_accuracy": 0.6758, | |
"eval_f1_macro": 0.6022459051876843, | |
"eval_f1_micro": 0.6758, | |
"eval_loss": 0.958946704864502, | |
"eval_runtime": 4.7777, | |
"eval_samples_per_second": 1046.528, | |
"eval_steps_per_second": 16.535, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.82, | |
"grad_norm": 6.0425848960876465, | |
"learning_rate": 7.894288150042626e-06, | |
"loss": 0.9204, | |
"step": 710 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 6.606093406677246, | |
"learning_rate": 7.72378516624041e-06, | |
"loss": 0.9759, | |
"step": 720 | |
}, | |
{ | |
"epoch": 1.87, | |
"grad_norm": 5.485508441925049, | |
"learning_rate": 7.553282182438193e-06, | |
"loss": 0.888, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.89, | |
"grad_norm": 4.693540573120117, | |
"learning_rate": 7.382779198635977e-06, | |
"loss": 0.9657, | |
"step": 740 | |
}, | |
{ | |
"epoch": 1.92, | |
"grad_norm": 5.051286220550537, | |
"learning_rate": 7.21227621483376e-06, | |
"loss": 0.9263, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.92, | |
"eval_accuracy": 0.6736, | |
"eval_f1_macro": 0.6034216869450798, | |
"eval_f1_micro": 0.6736, | |
"eval_loss": 0.9557965993881226, | |
"eval_runtime": 4.7748, | |
"eval_samples_per_second": 1047.155, | |
"eval_steps_per_second": 16.545, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.94, | |
"grad_norm": 5.837856769561768, | |
"learning_rate": 7.0417732310315436e-06, | |
"loss": 0.9376, | |
"step": 760 | |
}, | |
{ | |
"epoch": 1.97, | |
"grad_norm": 5.719339847564697, | |
"learning_rate": 6.8712702472293265e-06, | |
"loss": 0.873, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.99, | |
"grad_norm": 7.034103870391846, | |
"learning_rate": 6.70076726342711e-06, | |
"loss": 0.9496, | |
"step": 780 | |
}, | |
{ | |
"epoch": 2.02, | |
"grad_norm": 5.690976619720459, | |
"learning_rate": 6.530264279624894e-06, | |
"loss": 0.8258, | |
"step": 790 | |
}, | |
{ | |
"epoch": 2.05, | |
"grad_norm": 5.264106273651123, | |
"learning_rate": 6.359761295822677e-06, | |
"loss": 0.7809, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.05, | |
"eval_accuracy": 0.6768, | |
"eval_f1_macro": 0.6070709070044171, | |
"eval_f1_micro": 0.6768, | |
"eval_loss": 0.9508742094039917, | |
"eval_runtime": 4.7817, | |
"eval_samples_per_second": 1045.655, | |
"eval_steps_per_second": 16.521, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.07, | |
"grad_norm": 7.548644065856934, | |
"learning_rate": 6.189258312020461e-06, | |
"loss": 0.8295, | |
"step": 810 | |
}, | |
{ | |
"epoch": 2.1, | |
"grad_norm": 4.085268497467041, | |
"learning_rate": 6.018755328218244e-06, | |
"loss": 0.7423, | |
"step": 820 | |
}, | |
{ | |
"epoch": 2.12, | |
"grad_norm": 5.644011497497559, | |
"learning_rate": 5.848252344416027e-06, | |
"loss": 0.8411, | |
"step": 830 | |
}, | |
{ | |
"epoch": 2.15, | |
"grad_norm": 4.83861780166626, | |
"learning_rate": 5.677749360613811e-06, | |
"loss": 0.8216, | |
"step": 840 | |
}, | |
{ | |
"epoch": 2.17, | |
"grad_norm": 4.846010684967041, | |
"learning_rate": 5.507246376811595e-06, | |
"loss": 0.8141, | |
"step": 850 | |
}, | |
{ | |
"epoch": 2.17, | |
"eval_accuracy": 0.6794, | |
"eval_f1_macro": 0.6063240439290968, | |
"eval_f1_micro": 0.6794, | |
"eval_loss": 0.9481638669967651, | |
"eval_runtime": 4.7753, | |
"eval_samples_per_second": 1047.051, | |
"eval_steps_per_second": 16.543, | |
"step": 850 | |
}, | |
{ | |
"epoch": 2.2, | |
"grad_norm": 5.157903671264648, | |
"learning_rate": 5.336743393009378e-06, | |
"loss": 0.8838, | |
"step": 860 | |
}, | |
{ | |
"epoch": 2.23, | |
"grad_norm": 6.258475303649902, | |
"learning_rate": 5.1662404092071615e-06, | |
"loss": 0.7946, | |
"step": 870 | |
}, | |
{ | |
"epoch": 2.25, | |
"grad_norm": 4.669546127319336, | |
"learning_rate": 4.995737425404945e-06, | |
"loss": 0.8488, | |
"step": 880 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 4.360635757446289, | |
"learning_rate": 4.825234441602728e-06, | |
"loss": 0.8302, | |
"step": 890 | |
}, | |
{ | |
"epoch": 2.3, | |
"grad_norm": 5.383681297302246, | |
"learning_rate": 4.654731457800512e-06, | |
"loss": 0.8932, | |
"step": 900 | |
}, | |
{ | |
"epoch": 2.3, | |
"eval_accuracy": 0.6764, | |
"eval_f1_macro": 0.6094785902217723, | |
"eval_f1_micro": 0.6764, | |
"eval_loss": 0.9553532004356384, | |
"eval_runtime": 4.7788, | |
"eval_samples_per_second": 1046.298, | |
"eval_steps_per_second": 16.532, | |
"step": 900 | |
}, | |
{ | |
"epoch": 2.33, | |
"grad_norm": 5.206852436065674, | |
"learning_rate": 4.484228473998296e-06, | |
"loss": 0.8359, | |
"step": 910 | |
}, | |
{ | |
"epoch": 2.35, | |
"grad_norm": 6.243586540222168, | |
"learning_rate": 4.313725490196079e-06, | |
"loss": 0.8078, | |
"step": 920 | |
}, | |
{ | |
"epoch": 2.38, | |
"grad_norm": 5.501513481140137, | |
"learning_rate": 4.143222506393862e-06, | |
"loss": 0.7629, | |
"step": 930 | |
}, | |
{ | |
"epoch": 2.4, | |
"grad_norm": 5.847798824310303, | |
"learning_rate": 3.972719522591646e-06, | |
"loss": 0.7841, | |
"step": 940 | |
}, | |
{ | |
"epoch": 2.43, | |
"grad_norm": 5.492492198944092, | |
"learning_rate": 3.802216538789429e-06, | |
"loss": 0.827, | |
"step": 950 | |
}, | |
{ | |
"epoch": 2.43, | |
"eval_accuracy": 0.6784, | |
"eval_f1_macro": 0.6098401822805521, | |
"eval_f1_micro": 0.6784, | |
"eval_loss": 0.9509662389755249, | |
"eval_runtime": 4.7764, | |
"eval_samples_per_second": 1046.821, | |
"eval_steps_per_second": 16.54, | |
"step": 950 | |
}, | |
{ | |
"epoch": 2.46, | |
"grad_norm": 7.3501152992248535, | |
"learning_rate": 3.6317135549872124e-06, | |
"loss": 0.8394, | |
"step": 960 | |
}, | |
{ | |
"epoch": 2.48, | |
"grad_norm": 5.739101886749268, | |
"learning_rate": 3.4612105711849957e-06, | |
"loss": 0.8458, | |
"step": 970 | |
}, | |
{ | |
"epoch": 2.51, | |
"grad_norm": 6.37858772277832, | |
"learning_rate": 3.2907075873827795e-06, | |
"loss": 0.7629, | |
"step": 980 | |
}, | |
{ | |
"epoch": 2.53, | |
"grad_norm": 5.225438594818115, | |
"learning_rate": 3.120204603580563e-06, | |
"loss": 0.866, | |
"step": 990 | |
}, | |
{ | |
"epoch": 2.56, | |
"grad_norm": 5.552731513977051, | |
"learning_rate": 2.949701619778346e-06, | |
"loss": 0.8278, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.56, | |
"eval_accuracy": 0.6772, | |
"eval_f1_macro": 0.6055603073179142, | |
"eval_f1_micro": 0.6772, | |
"eval_loss": 0.9564550518989563, | |
"eval_runtime": 4.7776, | |
"eval_samples_per_second": 1046.543, | |
"eval_steps_per_second": 16.535, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.58, | |
"grad_norm": 4.111003398895264, | |
"learning_rate": 2.77919863597613e-06, | |
"loss": 0.7933, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 2.61, | |
"grad_norm": 3.8121485710144043, | |
"learning_rate": 2.6086956521739132e-06, | |
"loss": 0.7555, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 2.63, | |
"grad_norm": 4.48773193359375, | |
"learning_rate": 2.4381926683716966e-06, | |
"loss": 0.7593, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 2.66, | |
"grad_norm": 6.070838928222656, | |
"learning_rate": 2.2676896845694803e-06, | |
"loss": 0.7822, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 2.69, | |
"grad_norm": 6.024387836456299, | |
"learning_rate": 2.0971867007672637e-06, | |
"loss": 0.7278, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 2.69, | |
"eval_accuracy": 0.6776, | |
"eval_f1_macro": 0.607961437676987, | |
"eval_f1_micro": 0.6776, | |
"eval_loss": 0.9521195292472839, | |
"eval_runtime": 4.7736, | |
"eval_samples_per_second": 1047.424, | |
"eval_steps_per_second": 16.549, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 2.71, | |
"grad_norm": 5.181501865386963, | |
"learning_rate": 1.926683716965047e-06, | |
"loss": 0.7487, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 2.74, | |
"grad_norm": 5.721589088439941, | |
"learning_rate": 1.7561807331628305e-06, | |
"loss": 0.8056, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 2.76, | |
"grad_norm": 5.678706645965576, | |
"learning_rate": 1.585677749360614e-06, | |
"loss": 0.7794, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 5.376491069793701, | |
"learning_rate": 1.4151747655583974e-06, | |
"loss": 0.7767, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 2.81, | |
"grad_norm": 5.12365198135376, | |
"learning_rate": 1.2446717817561808e-06, | |
"loss": 0.7698, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 2.81, | |
"eval_accuracy": 0.6802, | |
"eval_f1_macro": 0.6099237566638949, | |
"eval_f1_micro": 0.6802, | |
"eval_loss": 0.9474265575408936, | |
"eval_runtime": 4.7752, | |
"eval_samples_per_second": 1047.072, | |
"eval_steps_per_second": 16.544, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 2.84, | |
"grad_norm": 4.814740180969238, | |
"learning_rate": 1.0741687979539643e-06, | |
"loss": 0.8203, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 2.86, | |
"grad_norm": 5.175180912017822, | |
"learning_rate": 9.036658141517478e-07, | |
"loss": 0.7913, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 2.89, | |
"grad_norm": 5.307072639465332, | |
"learning_rate": 7.331628303495311e-07, | |
"loss": 0.7933, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 2.92, | |
"grad_norm": 4.609551429748535, | |
"learning_rate": 5.626598465473146e-07, | |
"loss": 0.8357, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 2.94, | |
"grad_norm": 5.993362903594971, | |
"learning_rate": 3.921568627450981e-07, | |
"loss": 0.8179, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 2.94, | |
"eval_accuracy": 0.6778, | |
"eval_f1_macro": 0.6087469050367866, | |
"eval_f1_micro": 0.6778, | |
"eval_loss": 0.9466402530670166, | |
"eval_runtime": 4.7731, | |
"eval_samples_per_second": 1047.545, | |
"eval_steps_per_second": 16.551, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 2.97, | |
"grad_norm": 4.948158264160156, | |
"learning_rate": 2.2165387894288152e-07, | |
"loss": 0.7716, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 2.99, | |
"grad_norm": 6.069735050201416, | |
"learning_rate": 5.115089514066497e-08, | |
"loss": 0.805, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 1173, | |
"total_flos": 4938378618339328.0, | |
"train_loss": 0.9952963392447938, | |
"train_runtime": 362.5909, | |
"train_samples_per_second": 206.845, | |
"train_steps_per_second": 3.235 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1173, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 4938378618339328.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |