daccuong2002's picture
Upload folder using huggingface_hub
9d65309 verified
raw
history blame contribute delete
No virus
22.8 kB
{
"best_metric": 0.3763463497161865,
"best_model_checkpoint": "PhoBert-SingleLabel-KMeans/checkpoint-2862",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 2862,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02620545073375262,
"grad_norm": 3.5584876537323,
"learning_rate": 4.355400696864112e-06,
"loss": 1.7744,
"step": 25
},
{
"epoch": 0.05241090146750524,
"grad_norm": 3.515974283218384,
"learning_rate": 8.710801393728225e-06,
"loss": 1.6878,
"step": 50
},
{
"epoch": 0.07861635220125786,
"grad_norm": 3.664238691329956,
"learning_rate": 1.3066202090592336e-05,
"loss": 1.5996,
"step": 75
},
{
"epoch": 0.10482180293501048,
"grad_norm": 4.6186723709106445,
"learning_rate": 1.742160278745645e-05,
"loss": 1.4775,
"step": 100
},
{
"epoch": 0.1310272536687631,
"grad_norm": 5.333361625671387,
"learning_rate": 2.1777003484320557e-05,
"loss": 1.3713,
"step": 125
},
{
"epoch": 0.15723270440251572,
"grad_norm": 10.412248611450195,
"learning_rate": 2.6132404181184672e-05,
"loss": 1.0538,
"step": 150
},
{
"epoch": 0.18343815513626835,
"grad_norm": 3.342909812927246,
"learning_rate": 3.048780487804878e-05,
"loss": 0.9225,
"step": 175
},
{
"epoch": 0.20964360587002095,
"grad_norm": 5.6379547119140625,
"learning_rate": 3.48432055749129e-05,
"loss": 0.8722,
"step": 200
},
{
"epoch": 0.2358490566037736,
"grad_norm": 14.341384887695312,
"learning_rate": 3.9198606271777003e-05,
"loss": 0.8813,
"step": 225
},
{
"epoch": 0.2620545073375262,
"grad_norm": 8.06952953338623,
"learning_rate": 4.3554006968641115e-05,
"loss": 0.8815,
"step": 250
},
{
"epoch": 0.2882599580712788,
"grad_norm": 16.64231300354004,
"learning_rate": 4.7909407665505226e-05,
"loss": 0.6877,
"step": 275
},
{
"epoch": 0.31446540880503143,
"grad_norm": 10.036324501037598,
"learning_rate": 4.974757281553398e-05,
"loss": 0.612,
"step": 300
},
{
"epoch": 0.34067085953878407,
"grad_norm": 28.696367263793945,
"learning_rate": 4.9262135922330097e-05,
"loss": 0.5523,
"step": 325
},
{
"epoch": 0.3668763102725367,
"grad_norm": 2.96999192237854,
"learning_rate": 4.877669902912622e-05,
"loss": 0.5393,
"step": 350
},
{
"epoch": 0.39308176100628933,
"grad_norm": 0.7660526037216187,
"learning_rate": 4.829126213592233e-05,
"loss": 0.6644,
"step": 375
},
{
"epoch": 0.4192872117400419,
"grad_norm": 23.782712936401367,
"learning_rate": 4.780582524271845e-05,
"loss": 0.5915,
"step": 400
},
{
"epoch": 0.44549266247379454,
"grad_norm": 14.538418769836426,
"learning_rate": 4.732038834951457e-05,
"loss": 0.4766,
"step": 425
},
{
"epoch": 0.4716981132075472,
"grad_norm": 1.3607491254806519,
"learning_rate": 4.683495145631068e-05,
"loss": 0.592,
"step": 450
},
{
"epoch": 0.4979035639412998,
"grad_norm": 2.8048417568206787,
"learning_rate": 4.63495145631068e-05,
"loss": 0.6274,
"step": 475
},
{
"epoch": 0.5241090146750524,
"grad_norm": 294.476806640625,
"learning_rate": 4.586407766990292e-05,
"loss": 0.4636,
"step": 500
},
{
"epoch": 0.550314465408805,
"grad_norm": 3.9831271171569824,
"learning_rate": 4.5378640776699033e-05,
"loss": 0.6935,
"step": 525
},
{
"epoch": 0.5765199161425576,
"grad_norm": 8.449265480041504,
"learning_rate": 4.489320388349515e-05,
"loss": 0.5617,
"step": 550
},
{
"epoch": 0.6027253668763103,
"grad_norm": 0.5198196172714233,
"learning_rate": 4.440776699029127e-05,
"loss": 0.6695,
"step": 575
},
{
"epoch": 0.6289308176100629,
"grad_norm": 0.511917769908905,
"learning_rate": 4.3922330097087384e-05,
"loss": 0.6177,
"step": 600
},
{
"epoch": 0.6551362683438156,
"grad_norm": 0.4070751368999481,
"learning_rate": 4.34368932038835e-05,
"loss": 0.4971,
"step": 625
},
{
"epoch": 0.6813417190775681,
"grad_norm": 24.54846954345703,
"learning_rate": 4.295145631067961e-05,
"loss": 0.3295,
"step": 650
},
{
"epoch": 0.7075471698113207,
"grad_norm": 26.0633487701416,
"learning_rate": 4.246601941747573e-05,
"loss": 0.4744,
"step": 675
},
{
"epoch": 0.7337526205450734,
"grad_norm": 4.230802059173584,
"learning_rate": 4.198058252427184e-05,
"loss": 0.4744,
"step": 700
},
{
"epoch": 0.759958071278826,
"grad_norm": 3.7451541423797607,
"learning_rate": 4.1495145631067964e-05,
"loss": 0.5867,
"step": 725
},
{
"epoch": 0.7861635220125787,
"grad_norm": 8.754020690917969,
"learning_rate": 4.100970873786408e-05,
"loss": 0.4409,
"step": 750
},
{
"epoch": 0.8123689727463312,
"grad_norm": 0.14476385712623596,
"learning_rate": 4.052427184466019e-05,
"loss": 0.5824,
"step": 775
},
{
"epoch": 0.8385744234800838,
"grad_norm": 0.38359129428863525,
"learning_rate": 4.0038834951456314e-05,
"loss": 0.4913,
"step": 800
},
{
"epoch": 0.8647798742138365,
"grad_norm": 34.098060607910156,
"learning_rate": 3.955339805825243e-05,
"loss": 0.3129,
"step": 825
},
{
"epoch": 0.8909853249475891,
"grad_norm": 3.0660719871520996,
"learning_rate": 3.906796116504854e-05,
"loss": 0.6103,
"step": 850
},
{
"epoch": 0.9171907756813418,
"grad_norm": 7.5438385009765625,
"learning_rate": 3.858252427184466e-05,
"loss": 0.495,
"step": 875
},
{
"epoch": 0.9433962264150944,
"grad_norm": 67.45906829833984,
"learning_rate": 3.809708737864078e-05,
"loss": 0.5724,
"step": 900
},
{
"epoch": 0.9696016771488469,
"grad_norm": 3.6292409896850586,
"learning_rate": 3.7611650485436894e-05,
"loss": 0.3536,
"step": 925
},
{
"epoch": 0.9958071278825996,
"grad_norm": 29.442237854003906,
"learning_rate": 3.712621359223301e-05,
"loss": 0.4138,
"step": 950
},
{
"epoch": 1.0,
"eval_accuracy": 0.9000717188620607,
"eval_f1_macro": 0.8889047304060399,
"eval_f1_micro": 0.9000717188620607,
"eval_f1_weighted": 0.9004594891408181,
"eval_loss": 0.446470707654953,
"eval_precision_macro": 0.8923438114196293,
"eval_precision_micro": 0.9000717188620607,
"eval_precision_weighted": 0.9041587013504236,
"eval_recall_macro": 0.8879970575079902,
"eval_recall_micro": 0.9000717188620607,
"eval_recall_weighted": 0.9000717188620607,
"eval_runtime": 755.376,
"eval_samples_per_second": 5.538,
"eval_steps_per_second": 0.347,
"step": 954
},
{
"epoch": 1.0220125786163523,
"grad_norm": 15.653645515441895,
"learning_rate": 3.664077669902913e-05,
"loss": 0.3952,
"step": 975
},
{
"epoch": 1.0482180293501049,
"grad_norm": 0.11768877506256104,
"learning_rate": 3.6155339805825244e-05,
"loss": 0.3761,
"step": 1000
},
{
"epoch": 1.0744234800838575,
"grad_norm": 2.6586456298828125,
"learning_rate": 3.566990291262136e-05,
"loss": 0.3241,
"step": 1025
},
{
"epoch": 1.10062893081761,
"grad_norm": 4.2325119972229,
"learning_rate": 3.518446601941748e-05,
"loss": 0.5266,
"step": 1050
},
{
"epoch": 1.1268343815513626,
"grad_norm": 0.2591557502746582,
"learning_rate": 3.4699029126213595e-05,
"loss": 0.3688,
"step": 1075
},
{
"epoch": 1.1530398322851152,
"grad_norm": 7.8002777099609375,
"learning_rate": 3.421359223300971e-05,
"loss": 0.3996,
"step": 1100
},
{
"epoch": 1.179245283018868,
"grad_norm": 7.753486633300781,
"learning_rate": 3.372815533980583e-05,
"loss": 0.5435,
"step": 1125
},
{
"epoch": 1.2054507337526206,
"grad_norm": 4.600539684295654,
"learning_rate": 3.3242718446601945e-05,
"loss": 0.4125,
"step": 1150
},
{
"epoch": 1.2316561844863732,
"grad_norm": 0.507254421710968,
"learning_rate": 3.275728155339806e-05,
"loss": 0.3485,
"step": 1175
},
{
"epoch": 1.2578616352201257,
"grad_norm": 2.2480947971343994,
"learning_rate": 3.227184466019418e-05,
"loss": 0.3006,
"step": 1200
},
{
"epoch": 1.2840670859538785,
"grad_norm": 0.17361511290073395,
"learning_rate": 3.1786407766990296e-05,
"loss": 0.372,
"step": 1225
},
{
"epoch": 1.310272536687631,
"grad_norm": 2.2198197841644287,
"learning_rate": 3.130097087378641e-05,
"loss": 0.4539,
"step": 1250
},
{
"epoch": 1.3364779874213837,
"grad_norm": 2.0315468311309814,
"learning_rate": 3.0815533980582525e-05,
"loss": 0.482,
"step": 1275
},
{
"epoch": 1.3626834381551363,
"grad_norm": 8.348484992980957,
"learning_rate": 3.0330097087378646e-05,
"loss": 0.3263,
"step": 1300
},
{
"epoch": 1.3888888888888888,
"grad_norm": 7.209967136383057,
"learning_rate": 2.9844660194174757e-05,
"loss": 0.332,
"step": 1325
},
{
"epoch": 1.4150943396226414,
"grad_norm": 56.43852615356445,
"learning_rate": 2.9359223300970872e-05,
"loss": 0.5554,
"step": 1350
},
{
"epoch": 1.441299790356394,
"grad_norm": 1.4395267963409424,
"learning_rate": 2.887378640776699e-05,
"loss": 0.4749,
"step": 1375
},
{
"epoch": 1.4675052410901468,
"grad_norm": 70.63890838623047,
"learning_rate": 2.8388349514563105e-05,
"loss": 0.4356,
"step": 1400
},
{
"epoch": 1.4937106918238994,
"grad_norm": 52.49846649169922,
"learning_rate": 2.7902912621359222e-05,
"loss": 0.3524,
"step": 1425
},
{
"epoch": 1.519916142557652,
"grad_norm": 39.48180389404297,
"learning_rate": 2.741747572815534e-05,
"loss": 0.4155,
"step": 1450
},
{
"epoch": 1.5461215932914047,
"grad_norm": 4.6654510498046875,
"learning_rate": 2.6932038834951455e-05,
"loss": 0.3802,
"step": 1475
},
{
"epoch": 1.5723270440251573,
"grad_norm": 2.445298433303833,
"learning_rate": 2.6446601941747573e-05,
"loss": 0.2013,
"step": 1500
},
{
"epoch": 1.59853249475891,
"grad_norm": 0.5410274267196655,
"learning_rate": 2.596116504854369e-05,
"loss": 0.3016,
"step": 1525
},
{
"epoch": 1.6247379454926625,
"grad_norm": 6.175892353057861,
"learning_rate": 2.5475728155339806e-05,
"loss": 0.4059,
"step": 1550
},
{
"epoch": 1.650943396226415,
"grad_norm": 19.350805282592773,
"learning_rate": 2.4990291262135923e-05,
"loss": 0.2649,
"step": 1575
},
{
"epoch": 1.6771488469601676,
"grad_norm": 0.5931205749511719,
"learning_rate": 2.450485436893204e-05,
"loss": 0.2931,
"step": 1600
},
{
"epoch": 1.7033542976939202,
"grad_norm": 20.36564826965332,
"learning_rate": 2.4019417475728156e-05,
"loss": 0.2659,
"step": 1625
},
{
"epoch": 1.7295597484276728,
"grad_norm": 0.1273786723613739,
"learning_rate": 2.3533980582524274e-05,
"loss": 0.2192,
"step": 1650
},
{
"epoch": 1.7557651991614256,
"grad_norm": 39.38661193847656,
"learning_rate": 2.304854368932039e-05,
"loss": 0.4232,
"step": 1675
},
{
"epoch": 1.7819706498951782,
"grad_norm": 35.051902770996094,
"learning_rate": 2.2563106796116507e-05,
"loss": 0.4047,
"step": 1700
},
{
"epoch": 1.808176100628931,
"grad_norm": 10.8402738571167,
"learning_rate": 2.2077669902912624e-05,
"loss": 0.257,
"step": 1725
},
{
"epoch": 1.8343815513626835,
"grad_norm": 18.704975128173828,
"learning_rate": 2.159223300970874e-05,
"loss": 0.3526,
"step": 1750
},
{
"epoch": 1.8605870020964361,
"grad_norm": 0.09916484355926514,
"learning_rate": 2.1106796116504857e-05,
"loss": 0.3903,
"step": 1775
},
{
"epoch": 1.8867924528301887,
"grad_norm": 4.514852046966553,
"learning_rate": 2.062135922330097e-05,
"loss": 0.3345,
"step": 1800
},
{
"epoch": 1.9129979035639413,
"grad_norm": 31.562023162841797,
"learning_rate": 2.0135922330097086e-05,
"loss": 0.5511,
"step": 1825
},
{
"epoch": 1.9392033542976939,
"grad_norm": 13.065159797668457,
"learning_rate": 1.9650485436893204e-05,
"loss": 0.2387,
"step": 1850
},
{
"epoch": 1.9654088050314464,
"grad_norm": 41.689964294433594,
"learning_rate": 1.9165048543689322e-05,
"loss": 0.3024,
"step": 1875
},
{
"epoch": 1.991614255765199,
"grad_norm": 10.776809692382812,
"learning_rate": 1.8679611650485437e-05,
"loss": 0.3133,
"step": 1900
},
{
"epoch": 2.0,
"eval_accuracy": 0.9022232847238824,
"eval_f1_macro": 0.8791017264419937,
"eval_f1_micro": 0.9022232847238824,
"eval_f1_weighted": 0.9030036030460146,
"eval_loss": 0.4108516573905945,
"eval_precision_macro": 0.8552051719676482,
"eval_precision_micro": 0.9022232847238824,
"eval_precision_weighted": 0.906337005319111,
"eval_recall_macro": 0.9078028911862802,
"eval_recall_micro": 0.9022232847238824,
"eval_recall_weighted": 0.9022232847238824,
"eval_runtime": 882.9584,
"eval_samples_per_second": 4.737,
"eval_steps_per_second": 0.297,
"step": 1908
},
{
"epoch": 2.0178197064989516,
"grad_norm": 7.140912055969238,
"learning_rate": 1.8194174757281555e-05,
"loss": 0.2435,
"step": 1925
},
{
"epoch": 2.0440251572327046,
"grad_norm": 13.220586776733398,
"learning_rate": 1.770873786407767e-05,
"loss": 0.2558,
"step": 1950
},
{
"epoch": 2.070230607966457,
"grad_norm": 4.269901275634766,
"learning_rate": 1.7223300970873787e-05,
"loss": 0.2658,
"step": 1975
},
{
"epoch": 2.0964360587002098,
"grad_norm": 0.11073017865419388,
"learning_rate": 1.6737864077669905e-05,
"loss": 0.1196,
"step": 2000
},
{
"epoch": 2.1226415094339623,
"grad_norm": 0.16363948583602905,
"learning_rate": 1.625242718446602e-05,
"loss": 0.3133,
"step": 2025
},
{
"epoch": 2.148846960167715,
"grad_norm": 0.40826770663261414,
"learning_rate": 1.5766990291262138e-05,
"loss": 0.2075,
"step": 2050
},
{
"epoch": 2.1750524109014675,
"grad_norm": 0.336016982793808,
"learning_rate": 1.5281553398058256e-05,
"loss": 0.2866,
"step": 2075
},
{
"epoch": 2.20125786163522,
"grad_norm": 0.0927320122718811,
"learning_rate": 1.479611650485437e-05,
"loss": 0.2213,
"step": 2100
},
{
"epoch": 2.2274633123689727,
"grad_norm": 0.058554600924253464,
"learning_rate": 1.4310679611650485e-05,
"loss": 0.1864,
"step": 2125
},
{
"epoch": 2.2536687631027252,
"grad_norm": 0.06633560359477997,
"learning_rate": 1.3825242718446601e-05,
"loss": 0.2221,
"step": 2150
},
{
"epoch": 2.279874213836478,
"grad_norm": 24.724422454833984,
"learning_rate": 1.3339805825242719e-05,
"loss": 0.2335,
"step": 2175
},
{
"epoch": 2.3060796645702304,
"grad_norm": 0.0917593464255333,
"learning_rate": 1.2854368932038835e-05,
"loss": 0.2385,
"step": 2200
},
{
"epoch": 2.3322851153039834,
"grad_norm": 0.06267809122800827,
"learning_rate": 1.2368932038834952e-05,
"loss": 0.2185,
"step": 2225
},
{
"epoch": 2.358490566037736,
"grad_norm": 0.05579007416963577,
"learning_rate": 1.1883495145631068e-05,
"loss": 0.2324,
"step": 2250
},
{
"epoch": 2.3846960167714886,
"grad_norm": 3.492902994155884,
"learning_rate": 1.1398058252427186e-05,
"loss": 0.4154,
"step": 2275
},
{
"epoch": 2.410901467505241,
"grad_norm": 0.16507238149642944,
"learning_rate": 1.0912621359223302e-05,
"loss": 0.2711,
"step": 2300
},
{
"epoch": 2.4371069182389937,
"grad_norm": 2.3962414264678955,
"learning_rate": 1.0427184466019418e-05,
"loss": 0.2866,
"step": 2325
},
{
"epoch": 2.4633123689727463,
"grad_norm": 2.307986259460449,
"learning_rate": 9.941747572815535e-06,
"loss": 0.4232,
"step": 2350
},
{
"epoch": 2.489517819706499,
"grad_norm": 8.218997955322266,
"learning_rate": 9.45631067961165e-06,
"loss": 0.2528,
"step": 2375
},
{
"epoch": 2.5157232704402515,
"grad_norm": 0.10614609718322754,
"learning_rate": 8.970873786407767e-06,
"loss": 0.0532,
"step": 2400
},
{
"epoch": 2.541928721174004,
"grad_norm": 6.59151554107666,
"learning_rate": 8.485436893203883e-06,
"loss": 0.2535,
"step": 2425
},
{
"epoch": 2.568134171907757,
"grad_norm": 0.46849825978279114,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2503,
"step": 2450
},
{
"epoch": 2.5943396226415096,
"grad_norm": 2.4090096950531006,
"learning_rate": 7.514563106796118e-06,
"loss": 0.345,
"step": 2475
},
{
"epoch": 2.620545073375262,
"grad_norm": 19.20216941833496,
"learning_rate": 7.029126213592234e-06,
"loss": 0.1694,
"step": 2500
},
{
"epoch": 2.646750524109015,
"grad_norm": 10.444535255432129,
"learning_rate": 6.543689320388349e-06,
"loss": 0.2558,
"step": 2525
},
{
"epoch": 2.6729559748427674,
"grad_norm": 0.15079370141029358,
"learning_rate": 6.058252427184466e-06,
"loss": 0.2905,
"step": 2550
},
{
"epoch": 2.69916142557652,
"grad_norm": 0.0647541806101799,
"learning_rate": 5.572815533980583e-06,
"loss": 0.1197,
"step": 2575
},
{
"epoch": 2.7253668763102725,
"grad_norm": 50.22942352294922,
"learning_rate": 5.087378640776699e-06,
"loss": 0.159,
"step": 2600
},
{
"epoch": 2.751572327044025,
"grad_norm": 0.06316754221916199,
"learning_rate": 4.601941747572816e-06,
"loss": 0.2032,
"step": 2625
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.06050650402903557,
"learning_rate": 4.116504854368932e-06,
"loss": 0.2446,
"step": 2650
},
{
"epoch": 2.8039832285115303,
"grad_norm": 7.044517993927002,
"learning_rate": 3.6310679611650486e-06,
"loss": 0.2692,
"step": 2675
},
{
"epoch": 2.830188679245283,
"grad_norm": 0.36540356278419495,
"learning_rate": 3.1456310679611653e-06,
"loss": 0.3174,
"step": 2700
},
{
"epoch": 2.8563941299790354,
"grad_norm": 1.1024131774902344,
"learning_rate": 2.660194174757282e-06,
"loss": 0.2608,
"step": 2725
},
{
"epoch": 2.882599580712788,
"grad_norm": 4.021923065185547,
"learning_rate": 2.1747572815533982e-06,
"loss": 0.0811,
"step": 2750
},
{
"epoch": 2.908805031446541,
"grad_norm": 0.10723225027322769,
"learning_rate": 1.6893203883495145e-06,
"loss": 0.097,
"step": 2775
},
{
"epoch": 2.9350104821802936,
"grad_norm": 0.07229924201965332,
"learning_rate": 1.2038834951456312e-06,
"loss": 0.3037,
"step": 2800
},
{
"epoch": 2.961215932914046,
"grad_norm": 38.69488525390625,
"learning_rate": 7.184466019417476e-07,
"loss": 0.2884,
"step": 2825
},
{
"epoch": 2.9874213836477987,
"grad_norm": 1.4321260452270508,
"learning_rate": 2.330097087378641e-07,
"loss": 0.3024,
"step": 2850
},
{
"epoch": 3.0,
"eval_accuracy": 0.9194358116184557,
"eval_f1_macro": 0.8947470035372601,
"eval_f1_micro": 0.9194358116184557,
"eval_f1_weighted": 0.9196235681987903,
"eval_loss": 0.3763463497161865,
"eval_precision_macro": 0.8811990938584341,
"eval_precision_micro": 0.9194358116184557,
"eval_precision_weighted": 0.9204284600348198,
"eval_recall_macro": 0.9094848524838759,
"eval_recall_micro": 0.9194358116184557,
"eval_recall_weighted": 0.9194358116184557,
"eval_runtime": 833.3591,
"eval_samples_per_second": 5.019,
"eval_steps_per_second": 0.314,
"step": 2862
}
],
"logging_steps": 25,
"max_steps": 2862,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1505115067765248.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}