actsa-distilbert / checkpoint-3246 /trainer_state.json
DarkPhantom323's picture
Upload folder using huggingface_hub
a07457e verified
raw
history blame contribute delete
No virus
24.9 kB
{
"best_metric": 1.0657285451889038,
"best_model_checkpoint": "actsa-distilbert/checkpoint-3246",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 3246,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02310536044362292,
"grad_norm": 2.318068265914917,
"learning_rate": 3.846153846153846e-05,
"loss": 1.0841,
"step": 25
},
{
"epoch": 0.04621072088724584,
"grad_norm": 3.620454788208008,
"learning_rate": 7.692307692307693e-05,
"loss": 1.0888,
"step": 50
},
{
"epoch": 0.06931608133086876,
"grad_norm": 2.4006574153900146,
"learning_rate": 0.0001153846153846154,
"loss": 1.0742,
"step": 75
},
{
"epoch": 0.09242144177449169,
"grad_norm": 4.676277160644531,
"learning_rate": 0.00015384615384615385,
"loss": 1.0871,
"step": 100
},
{
"epoch": 0.11552680221811461,
"grad_norm": 6.01164436340332,
"learning_rate": 0.00019230769230769233,
"loss": 1.1146,
"step": 125
},
{
"epoch": 0.13863216266173753,
"grad_norm": 2.2937514781951904,
"learning_rate": 0.0002307692307692308,
"loss": 1.0578,
"step": 150
},
{
"epoch": 0.16173752310536044,
"grad_norm": 2.6013343334198,
"learning_rate": 0.0002692307692307692,
"loss": 1.0571,
"step": 175
},
{
"epoch": 0.18484288354898337,
"grad_norm": 3.571342945098877,
"learning_rate": 0.0003076923076923077,
"loss": 1.145,
"step": 200
},
{
"epoch": 0.20794824399260628,
"grad_norm": 2.3125054836273193,
"learning_rate": 0.00034615384615384613,
"loss": 1.0901,
"step": 225
},
{
"epoch": 0.23105360443622922,
"grad_norm": 5.343853950500488,
"learning_rate": 0.00038461538461538467,
"loss": 1.0198,
"step": 250
},
{
"epoch": 0.2541589648798521,
"grad_norm": 4.148077487945557,
"learning_rate": 0.0004230769230769231,
"loss": 1.0066,
"step": 275
},
{
"epoch": 0.27726432532347506,
"grad_norm": 3.7557380199432373,
"learning_rate": 0.0004615384615384616,
"loss": 1.0734,
"step": 300
},
{
"epoch": 0.300369685767098,
"grad_norm": 3.485323190689087,
"learning_rate": 0.0005,
"loss": 1.0569,
"step": 325
},
{
"epoch": 0.3234750462107209,
"grad_norm": 2.63746976852417,
"learning_rate": 0.0004957206436152003,
"loss": 1.1081,
"step": 350
},
{
"epoch": 0.3465804066543438,
"grad_norm": 5.485255718231201,
"learning_rate": 0.0004914412872304005,
"loss": 1.1346,
"step": 375
},
{
"epoch": 0.36968576709796674,
"grad_norm": 2.920285940170288,
"learning_rate": 0.00048716193084560086,
"loss": 1.0998,
"step": 400
},
{
"epoch": 0.3927911275415896,
"grad_norm": 2.696606397628784,
"learning_rate": 0.0004828825744608011,
"loss": 1.1216,
"step": 425
},
{
"epoch": 0.41589648798521256,
"grad_norm": 2.0741255283355713,
"learning_rate": 0.0004786032180760014,
"loss": 1.1092,
"step": 450
},
{
"epoch": 0.4390018484288355,
"grad_norm": 2.102992534637451,
"learning_rate": 0.0004743238616912017,
"loss": 1.1172,
"step": 475
},
{
"epoch": 0.46210720887245843,
"grad_norm": 1.4443374872207642,
"learning_rate": 0.0004700445053064019,
"loss": 1.0929,
"step": 500
},
{
"epoch": 0.4852125693160813,
"grad_norm": 2.7878777980804443,
"learning_rate": 0.0004657651489216022,
"loss": 1.125,
"step": 525
},
{
"epoch": 0.5083179297597042,
"grad_norm": 2.387270212173462,
"learning_rate": 0.00046148579253680244,
"loss": 1.1069,
"step": 550
},
{
"epoch": 0.5314232902033271,
"grad_norm": 2.1369564533233643,
"learning_rate": 0.00045720643615200274,
"loss": 1.085,
"step": 575
},
{
"epoch": 0.5545286506469501,
"grad_norm": 2.407505989074707,
"learning_rate": 0.00045292707976720304,
"loss": 1.095,
"step": 600
},
{
"epoch": 0.577634011090573,
"grad_norm": 1.3333139419555664,
"learning_rate": 0.0004486477233824033,
"loss": 1.1035,
"step": 625
},
{
"epoch": 0.600739371534196,
"grad_norm": 1.891635537147522,
"learning_rate": 0.0004443683669976036,
"loss": 0.9419,
"step": 650
},
{
"epoch": 0.6238447319778189,
"grad_norm": 2.536003589630127,
"learning_rate": 0.00044008901061280383,
"loss": 1.1685,
"step": 675
},
{
"epoch": 0.6469500924214417,
"grad_norm": 2.3016841411590576,
"learning_rate": 0.00043580965422800413,
"loss": 1.0619,
"step": 700
},
{
"epoch": 0.6700554528650647,
"grad_norm": 1.4672470092773438,
"learning_rate": 0.00043153029784320443,
"loss": 1.1276,
"step": 725
},
{
"epoch": 0.6931608133086876,
"grad_norm": 2.2047739028930664,
"learning_rate": 0.0004272509414584047,
"loss": 1.0428,
"step": 750
},
{
"epoch": 0.7162661737523105,
"grad_norm": 4.394761562347412,
"learning_rate": 0.000422971585073605,
"loss": 1.082,
"step": 775
},
{
"epoch": 0.7393715341959335,
"grad_norm": 1.6812162399291992,
"learning_rate": 0.00041869222868880517,
"loss": 1.1325,
"step": 800
},
{
"epoch": 0.7624768946395564,
"grad_norm": 2.4136714935302734,
"learning_rate": 0.00041441287230400547,
"loss": 1.0486,
"step": 825
},
{
"epoch": 0.7855822550831792,
"grad_norm": 1.2103418111801147,
"learning_rate": 0.00041013351591920577,
"loss": 1.093,
"step": 850
},
{
"epoch": 0.8086876155268022,
"grad_norm": 2.494950771331787,
"learning_rate": 0.000405854159534406,
"loss": 1.0638,
"step": 875
},
{
"epoch": 0.8317929759704251,
"grad_norm": 1.8322207927703857,
"learning_rate": 0.0004015748031496063,
"loss": 0.9711,
"step": 900
},
{
"epoch": 0.8548983364140481,
"grad_norm": 2.9408106803894043,
"learning_rate": 0.00039729544676480656,
"loss": 1.1413,
"step": 925
},
{
"epoch": 0.878003696857671,
"grad_norm": 1.0680229663848877,
"learning_rate": 0.00039301609038000686,
"loss": 1.0609,
"step": 950
},
{
"epoch": 0.9011090573012939,
"grad_norm": 1.952222466468811,
"learning_rate": 0.00038873673399520716,
"loss": 1.0954,
"step": 975
},
{
"epoch": 0.9242144177449169,
"grad_norm": 3.1969923973083496,
"learning_rate": 0.0003844573776104074,
"loss": 1.0895,
"step": 1000
},
{
"epoch": 0.9473197781885397,
"grad_norm": 2.0018506050109863,
"learning_rate": 0.0003801780212256077,
"loss": 1.0697,
"step": 1025
},
{
"epoch": 0.9704251386321626,
"grad_norm": 2.0683329105377197,
"learning_rate": 0.00037589866484080795,
"loss": 1.0999,
"step": 1050
},
{
"epoch": 0.9935304990757856,
"grad_norm": 2.1113333702087402,
"learning_rate": 0.00037161930845600825,
"loss": 1.1089,
"step": 1075
},
{
"epoch": 1.0,
"eval_accuracy": 0.4584103512014787,
"eval_f1_macro": 0.2095479509928179,
"eval_f1_micro": 0.4584103512014787,
"eval_f1_weighted": 0.2881768494245037,
"eval_loss": 1.0685529708862305,
"eval_precision_macro": 0.1528034504004929,
"eval_precision_micro": 0.4584103512014787,
"eval_precision_weighted": 0.21014005008866307,
"eval_recall_macro": 0.3333333333333333,
"eval_recall_micro": 0.4584103512014787,
"eval_recall_weighted": 0.4584103512014787,
"eval_runtime": 103.9446,
"eval_samples_per_second": 10.409,
"eval_steps_per_second": 1.308,
"step": 1082
},
{
"epoch": 1.0166358595194085,
"grad_norm": 2.078071117401123,
"learning_rate": 0.0003673399520712085,
"loss": 1.0538,
"step": 1100
},
{
"epoch": 1.0397412199630314,
"grad_norm": 1.2932239770889282,
"learning_rate": 0.00036306059568640874,
"loss": 1.1108,
"step": 1125
},
{
"epoch": 1.0628465804066543,
"grad_norm": 1.8026275634765625,
"learning_rate": 0.00035878123930160904,
"loss": 1.0782,
"step": 1150
},
{
"epoch": 1.0859519408502774,
"grad_norm": 2.0047168731689453,
"learning_rate": 0.0003545018829168093,
"loss": 1.0754,
"step": 1175
},
{
"epoch": 1.1090573012939002,
"grad_norm": 2.5215866565704346,
"learning_rate": 0.0003502225265320096,
"loss": 1.0822,
"step": 1200
},
{
"epoch": 1.1321626617375231,
"grad_norm": 1.7528326511383057,
"learning_rate": 0.0003459431701472099,
"loss": 1.0956,
"step": 1225
},
{
"epoch": 1.155268022181146,
"grad_norm": 2.345102548599243,
"learning_rate": 0.00034166381376241013,
"loss": 1.0284,
"step": 1250
},
{
"epoch": 1.1783733826247689,
"grad_norm": 2.051119327545166,
"learning_rate": 0.00033738445737761043,
"loss": 1.0811,
"step": 1275
},
{
"epoch": 1.201478743068392,
"grad_norm": 2.405728340148926,
"learning_rate": 0.0003331051009928107,
"loss": 1.0661,
"step": 1300
},
{
"epoch": 1.2245841035120149,
"grad_norm": 2.118389368057251,
"learning_rate": 0.000328825744608011,
"loss": 1.0821,
"step": 1325
},
{
"epoch": 1.2476894639556377,
"grad_norm": 1.383575677871704,
"learning_rate": 0.0003245463882232113,
"loss": 1.0988,
"step": 1350
},
{
"epoch": 1.2707948243992606,
"grad_norm": 0.9262095093727112,
"learning_rate": 0.00032026703183841147,
"loss": 1.0855,
"step": 1375
},
{
"epoch": 1.2939001848428835,
"grad_norm": 0.8497403860092163,
"learning_rate": 0.00031598767545361177,
"loss": 1.0734,
"step": 1400
},
{
"epoch": 1.3170055452865066,
"grad_norm": 2.554072856903076,
"learning_rate": 0.000311708319068812,
"loss": 1.0495,
"step": 1425
},
{
"epoch": 1.3401109057301293,
"grad_norm": 2.196169853210449,
"learning_rate": 0.0003074289626840123,
"loss": 1.0611,
"step": 1450
},
{
"epoch": 1.3632162661737524,
"grad_norm": 1.9735369682312012,
"learning_rate": 0.0003031496062992126,
"loss": 1.0689,
"step": 1475
},
{
"epoch": 1.3863216266173752,
"grad_norm": 1.28324556350708,
"learning_rate": 0.00029887024991441286,
"loss": 1.1195,
"step": 1500
},
{
"epoch": 1.4094269870609981,
"grad_norm": 2.870333671569824,
"learning_rate": 0.00029459089352961316,
"loss": 1.0909,
"step": 1525
},
{
"epoch": 1.432532347504621,
"grad_norm": 2.790924549102783,
"learning_rate": 0.00029031153714481346,
"loss": 1.1001,
"step": 1550
},
{
"epoch": 1.4556377079482439,
"grad_norm": 1.2432293891906738,
"learning_rate": 0.0002860321807600137,
"loss": 1.124,
"step": 1575
},
{
"epoch": 1.478743068391867,
"grad_norm": 0.8692905306816101,
"learning_rate": 0.000281752824375214,
"loss": 1.0717,
"step": 1600
},
{
"epoch": 1.5018484288354899,
"grad_norm": 1.2673799991607666,
"learning_rate": 0.00027747346799041425,
"loss": 1.086,
"step": 1625
},
{
"epoch": 1.5249537892791127,
"grad_norm": 1.9591180086135864,
"learning_rate": 0.00027319411160561455,
"loss": 1.0408,
"step": 1650
},
{
"epoch": 1.5480591497227358,
"grad_norm": 1.8689022064208984,
"learning_rate": 0.00026891475522081485,
"loss": 1.0499,
"step": 1675
},
{
"epoch": 1.5711645101663585,
"grad_norm": 2.0180625915527344,
"learning_rate": 0.00026463539883601504,
"loss": 1.0936,
"step": 1700
},
{
"epoch": 1.5942698706099816,
"grad_norm": 0.9715362191200256,
"learning_rate": 0.00026035604245121534,
"loss": 1.0886,
"step": 1725
},
{
"epoch": 1.6173752310536045,
"grad_norm": 1.5542079210281372,
"learning_rate": 0.0002560766860664156,
"loss": 1.0825,
"step": 1750
},
{
"epoch": 1.6404805914972274,
"grad_norm": 2.8334264755249023,
"learning_rate": 0.0002517973296816159,
"loss": 1.0932,
"step": 1775
},
{
"epoch": 1.6635859519408502,
"grad_norm": 0.5704463720321655,
"learning_rate": 0.0002475179732968162,
"loss": 1.1057,
"step": 1800
},
{
"epoch": 1.6866913123844731,
"grad_norm": 2.452209949493408,
"learning_rate": 0.00024323861691201643,
"loss": 1.0569,
"step": 1825
},
{
"epoch": 1.7097966728280962,
"grad_norm": 1.304701805114746,
"learning_rate": 0.0002389592605272167,
"loss": 1.0859,
"step": 1850
},
{
"epoch": 1.7329020332717189,
"grad_norm": 1.5926436185836792,
"learning_rate": 0.000234679904142417,
"loss": 1.0832,
"step": 1875
},
{
"epoch": 1.756007393715342,
"grad_norm": 2.683790445327759,
"learning_rate": 0.00023040054775761728,
"loss": 1.0539,
"step": 1900
},
{
"epoch": 1.7791127541589649,
"grad_norm": 1.4463739395141602,
"learning_rate": 0.00022612119137281752,
"loss": 1.0358,
"step": 1925
},
{
"epoch": 1.8022181146025877,
"grad_norm": 3.22567081451416,
"learning_rate": 0.0002218418349880178,
"loss": 0.9887,
"step": 1950
},
{
"epoch": 1.8253234750462108,
"grad_norm": 0.9853553771972656,
"learning_rate": 0.00021756247860321807,
"loss": 1.0566,
"step": 1975
},
{
"epoch": 1.8484288354898335,
"grad_norm": 1.0822279453277588,
"learning_rate": 0.00021328312221841837,
"loss": 1.0437,
"step": 2000
},
{
"epoch": 1.8715341959334566,
"grad_norm": 1.9909262657165527,
"learning_rate": 0.00020900376583361864,
"loss": 1.0059,
"step": 2025
},
{
"epoch": 1.8946395563770795,
"grad_norm": 1.8531982898712158,
"learning_rate": 0.0002047244094488189,
"loss": 1.0671,
"step": 2050
},
{
"epoch": 1.9177449168207024,
"grad_norm": 1.0466595888137817,
"learning_rate": 0.00020044505306401916,
"loss": 1.0766,
"step": 2075
},
{
"epoch": 1.9408502772643255,
"grad_norm": 0.8994802236557007,
"learning_rate": 0.00019616569667921946,
"loss": 0.9991,
"step": 2100
},
{
"epoch": 1.9639556377079481,
"grad_norm": 1.929488182067871,
"learning_rate": 0.00019188634029441973,
"loss": 1.0905,
"step": 2125
},
{
"epoch": 1.9870609981515712,
"grad_norm": 1.9165269136428833,
"learning_rate": 0.00018760698390962,
"loss": 1.039,
"step": 2150
},
{
"epoch": 2.0,
"eval_accuracy": 0.4584103512014787,
"eval_f1_macro": 0.2095479509928179,
"eval_f1_micro": 0.4584103512014787,
"eval_f1_weighted": 0.2881768494245037,
"eval_loss": 1.0747383832931519,
"eval_precision_macro": 0.1528034504004929,
"eval_precision_micro": 0.4584103512014787,
"eval_precision_weighted": 0.21014005008866307,
"eval_recall_macro": 0.3333333333333333,
"eval_recall_micro": 0.4584103512014787,
"eval_recall_weighted": 0.4584103512014787,
"eval_runtime": 105.3751,
"eval_samples_per_second": 10.268,
"eval_steps_per_second": 1.291,
"step": 2164
},
{
"epoch": 2.010166358595194,
"grad_norm": 4.0738606452941895,
"learning_rate": 0.00018332762752482028,
"loss": 1.0268,
"step": 2175
},
{
"epoch": 2.033271719038817,
"grad_norm": 2.0423989295959473,
"learning_rate": 0.00017904827114002055,
"loss": 1.0547,
"step": 2200
},
{
"epoch": 2.05637707948244,
"grad_norm": 2.3596909046173096,
"learning_rate": 0.00017476891475522082,
"loss": 1.0647,
"step": 2225
},
{
"epoch": 2.0794824399260627,
"grad_norm": 1.9593594074249268,
"learning_rate": 0.0001704895583704211,
"loss": 1.1023,
"step": 2250
},
{
"epoch": 2.102587800369686,
"grad_norm": 1.8953109979629517,
"learning_rate": 0.00016621020198562137,
"loss": 1.093,
"step": 2275
},
{
"epoch": 2.1256931608133085,
"grad_norm": 1.6998157501220703,
"learning_rate": 0.00016193084560082164,
"loss": 1.0777,
"step": 2300
},
{
"epoch": 2.1487985212569316,
"grad_norm": 1.0082741975784302,
"learning_rate": 0.0001576514892160219,
"loss": 1.0999,
"step": 2325
},
{
"epoch": 2.1719038817005547,
"grad_norm": 1.887078046798706,
"learning_rate": 0.0001533721328312222,
"loss": 1.1067,
"step": 2350
},
{
"epoch": 2.1950092421441774,
"grad_norm": 1.8659969568252563,
"learning_rate": 0.00014909277644642246,
"loss": 1.0769,
"step": 2375
},
{
"epoch": 2.2181146025878005,
"grad_norm": 0.916480302810669,
"learning_rate": 0.00014481342006162273,
"loss": 1.048,
"step": 2400
},
{
"epoch": 2.241219963031423,
"grad_norm": 1.9037351608276367,
"learning_rate": 0.000140534063676823,
"loss": 1.0509,
"step": 2425
},
{
"epoch": 2.2643253234750462,
"grad_norm": 1.9867937564849854,
"learning_rate": 0.00013625470729202328,
"loss": 1.0609,
"step": 2450
},
{
"epoch": 2.287430683918669,
"grad_norm": 2.044760227203369,
"learning_rate": 0.00013197535090722358,
"loss": 1.0351,
"step": 2475
},
{
"epoch": 2.310536044362292,
"grad_norm": 2.0097403526306152,
"learning_rate": 0.00012769599452242385,
"loss": 1.0433,
"step": 2500
},
{
"epoch": 2.333641404805915,
"grad_norm": 1.9191139936447144,
"learning_rate": 0.0001234166381376241,
"loss": 1.0219,
"step": 2525
},
{
"epoch": 2.3567467652495377,
"grad_norm": 1.9857523441314697,
"learning_rate": 0.00011913728175282437,
"loss": 1.0892,
"step": 2550
},
{
"epoch": 2.379852125693161,
"grad_norm": 2.7910494804382324,
"learning_rate": 0.00011485792536802465,
"loss": 1.1069,
"step": 2575
},
{
"epoch": 2.402957486136784,
"grad_norm": 1.5788651704788208,
"learning_rate": 0.00011057856898322493,
"loss": 1.0685,
"step": 2600
},
{
"epoch": 2.4260628465804066,
"grad_norm": 2.1729698181152344,
"learning_rate": 0.0001062992125984252,
"loss": 1.0369,
"step": 2625
},
{
"epoch": 2.4491682070240297,
"grad_norm": 1.842307686805725,
"learning_rate": 0.00010201985621362547,
"loss": 1.0777,
"step": 2650
},
{
"epoch": 2.4722735674676524,
"grad_norm": 1.7603636980056763,
"learning_rate": 9.774049982882574e-05,
"loss": 1.0776,
"step": 2675
},
{
"epoch": 2.4953789279112755,
"grad_norm": 3.0491414070129395,
"learning_rate": 9.346114344402602e-05,
"loss": 1.04,
"step": 2700
},
{
"epoch": 2.518484288354898,
"grad_norm": 1.8501389026641846,
"learning_rate": 8.918178705922629e-05,
"loss": 1.0217,
"step": 2725
},
{
"epoch": 2.5415896487985212,
"grad_norm": 1.788746953010559,
"learning_rate": 8.490243067442658e-05,
"loss": 1.086,
"step": 2750
},
{
"epoch": 2.5646950092421443,
"grad_norm": 1.6955204010009766,
"learning_rate": 8.062307428962684e-05,
"loss": 1.0784,
"step": 2775
},
{
"epoch": 2.587800369685767,
"grad_norm": 1.7728503942489624,
"learning_rate": 7.634371790482711e-05,
"loss": 1.0826,
"step": 2800
},
{
"epoch": 2.61090573012939,
"grad_norm": 1.9343187808990479,
"learning_rate": 7.20643615200274e-05,
"loss": 1.0258,
"step": 2825
},
{
"epoch": 2.634011090573013,
"grad_norm": 1.7846380472183228,
"learning_rate": 6.778500513522765e-05,
"loss": 1.1017,
"step": 2850
},
{
"epoch": 2.657116451016636,
"grad_norm": 0.9427468180656433,
"learning_rate": 6.350564875042794e-05,
"loss": 1.0762,
"step": 2875
},
{
"epoch": 2.6802218114602585,
"grad_norm": 2.756263017654419,
"learning_rate": 5.9226292365628206e-05,
"loss": 1.0642,
"step": 2900
},
{
"epoch": 2.7033271719038816,
"grad_norm": 1.782007098197937,
"learning_rate": 5.4946935980828486e-05,
"loss": 1.0402,
"step": 2925
},
{
"epoch": 2.7264325323475047,
"grad_norm": 2.0735743045806885,
"learning_rate": 5.066757959602876e-05,
"loss": 1.0347,
"step": 2950
},
{
"epoch": 2.7495378927911274,
"grad_norm": 1.935968279838562,
"learning_rate": 4.638822321122903e-05,
"loss": 1.0703,
"step": 2975
},
{
"epoch": 2.7726432532347505,
"grad_norm": 1.8476896286010742,
"learning_rate": 4.2108866826429304e-05,
"loss": 1.0754,
"step": 3000
},
{
"epoch": 2.7957486136783736,
"grad_norm": 1.8447425365447998,
"learning_rate": 3.782951044162958e-05,
"loss": 1.0004,
"step": 3025
},
{
"epoch": 2.8188539741219962,
"grad_norm": 1.811383605003357,
"learning_rate": 3.355015405682985e-05,
"loss": 1.108,
"step": 3050
},
{
"epoch": 2.8419593345656193,
"grad_norm": 1.8410310745239258,
"learning_rate": 2.927079767203013e-05,
"loss": 1.0884,
"step": 3075
},
{
"epoch": 2.865064695009242,
"grad_norm": 1.8326698541641235,
"learning_rate": 2.49914412872304e-05,
"loss": 1.0986,
"step": 3100
},
{
"epoch": 2.888170055452865,
"grad_norm": 1.9954646825790405,
"learning_rate": 2.0712084902430675e-05,
"loss": 1.0926,
"step": 3125
},
{
"epoch": 2.9112754158964878,
"grad_norm": 0.8552793264389038,
"learning_rate": 1.643272851763095e-05,
"loss": 1.1081,
"step": 3150
},
{
"epoch": 2.934380776340111,
"grad_norm": 1.6706448793411255,
"learning_rate": 1.2153372132831224e-05,
"loss": 1.1125,
"step": 3175
},
{
"epoch": 2.957486136783734,
"grad_norm": 1.8879237174987793,
"learning_rate": 7.874015748031496e-06,
"loss": 1.0289,
"step": 3200
},
{
"epoch": 2.9805914972273566,
"grad_norm": 1.7822940349578857,
"learning_rate": 3.59465936323177e-06,
"loss": 1.0718,
"step": 3225
},
{
"epoch": 3.0,
"eval_accuracy": 0.4584103512014787,
"eval_f1_macro": 0.2095479509928179,
"eval_f1_micro": 0.4584103512014787,
"eval_f1_weighted": 0.2881768494245037,
"eval_loss": 1.0657285451889038,
"eval_precision_macro": 0.1528034504004929,
"eval_precision_micro": 0.4584103512014787,
"eval_precision_weighted": 0.21014005008866307,
"eval_recall_macro": 0.3333333333333333,
"eval_recall_micro": 0.4584103512014787,
"eval_recall_weighted": 0.4584103512014787,
"eval_runtime": 112.831,
"eval_samples_per_second": 9.59,
"eval_steps_per_second": 1.205,
"step": 3246
}
],
"logging_steps": 25,
"max_steps": 3246,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 429996844283904.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}