poetry-bygpt5-medium-en / trainer_state.json
potamides's picture
add model files
770ef7f
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.999915498738517,
"global_step": 51770,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 5.791505791505791e-07,
"loss": 2.2583,
"step": 1
},
{
"epoch": 0.05,
"learning_rate": 0.00014478764478764478,
"loss": 1.4754,
"step": 250
},
{
"epoch": 0.1,
"learning_rate": 0.00028957528957528956,
"loss": 1.2393,
"step": 500
},
{
"epoch": 0.14,
"learning_rate": 0.0002999848327088156,
"loss": 1.2015,
"step": 750
},
{
"epoch": 0.19,
"learning_rate": 0.0002999345361024936,
"loss": 1.1787,
"step": 1000
},
{
"epoch": 0.24,
"learning_rate": 0.0002998490306892274,
"loss": 1.1622,
"step": 1250
},
{
"epoch": 0.29,
"learning_rate": 0.0002997283365480707,
"loss": 1.1493,
"step": 1500
},
{
"epoch": 0.34,
"learning_rate": 0.0002995724820213708,
"loss": 1.1407,
"step": 1750
},
{
"epoch": 0.39,
"learning_rate": 0.00029938150370811345,
"loss": 1.1306,
"step": 2000
},
{
"epoch": 0.43,
"learning_rate": 0.0002991554464553282,
"loss": 1.1228,
"step": 2250
},
{
"epoch": 0.48,
"learning_rate": 0.0002988943633475569,
"loss": 1.1142,
"step": 2500
},
{
"epoch": 0.53,
"learning_rate": 0.0002985983156943883,
"loss": 1.1082,
"step": 2750
},
{
"epoch": 0.58,
"learning_rate": 0.0002982673730160606,
"loss": 1.1035,
"step": 3000
},
{
"epoch": 0.63,
"learning_rate": 0.0002979016130271363,
"loss": 1.0949,
"step": 3250
},
{
"epoch": 0.68,
"learning_rate": 0.00029750112161825245,
"loss": 1.0909,
"step": 3500
},
{
"epoch": 0.72,
"learning_rate": 0.00029706599283595153,
"loss": 1.0862,
"step": 3750
},
{
"epoch": 0.77,
"learning_rate": 0.0002965963288605962,
"loss": 1.0799,
"step": 4000
},
{
"epoch": 0.82,
"learning_rate": 0.0002960922399823749,
"loss": 1.0752,
"step": 4250
},
{
"epoch": 0.87,
"learning_rate": 0.0002955538445754026,
"loss": 1.0717,
"step": 4500
},
{
"epoch": 0.92,
"learning_rate": 0.00029498126906992285,
"loss": 1.0675,
"step": 4750
},
{
"epoch": 0.97,
"learning_rate": 0.00029437464792261906,
"loss": 1.0629,
"step": 5000
},
{
"epoch": 1.0,
"eval_alliteration_score": 0.39176506206478956,
"eval_harmonic_meter_score": 0.10270844382058368,
"eval_harmonic_rhyme_score": 0.13433666525497734,
"eval_meter_score": 0.28353772535274874,
"eval_rhyme_score": 0.6209502922727462,
"eval_runtime": 2477.4307,
"eval_samples_per_second": 1.09,
"eval_steps_per_second": 0.136,
"step": 5177
},
{
"epoch": 1.01,
"learning_rate": 0.0002937341235850398,
"loss": 1.0558,
"step": 5250
},
{
"epoch": 1.06,
"learning_rate": 0.0002930598464701476,
"loss": 1.0356,
"step": 5500
},
{
"epoch": 1.11,
"learning_rate": 0.00029235197491699733,
"loss": 1.0344,
"step": 5750
},
{
"epoch": 1.16,
"learning_rate": 0.0002916106751535544,
"loss": 1.0315,
"step": 6000
},
{
"epoch": 1.21,
"learning_rate": 0.0002908361212576593,
"loss": 1.0306,
"step": 6250
},
{
"epoch": 1.26,
"learning_rate": 0.00029002849511614926,
"loss": 1.027,
"step": 6500
},
{
"epoch": 1.3,
"learning_rate": 0.0002891879863821466,
"loss": 1.0232,
"step": 6750
},
{
"epoch": 1.35,
"learning_rate": 0.00028831479243052253,
"loss": 1.021,
"step": 7000
},
{
"epoch": 1.4,
"learning_rate": 0.0002874091183115481,
"loss": 1.0189,
"step": 7250
},
{
"epoch": 1.45,
"learning_rate": 0.00028647117670274266,
"loss": 1.0161,
"step": 7500
},
{
"epoch": 1.5,
"learning_rate": 0.00028550118785893134,
"loss": 1.0132,
"step": 7750
},
{
"epoch": 1.55,
"learning_rate": 0.000284499379560523,
"loss": 1.0102,
"step": 8000
},
{
"epoch": 1.59,
"learning_rate": 0.0002834659870600212,
"loss": 1.0088,
"step": 8250
},
{
"epoch": 1.64,
"learning_rate": 0.0002824012530267801,
"loss": 1.0055,
"step": 8500
},
{
"epoch": 1.69,
"learning_rate": 0.00028130542749001906,
"loss": 1.0033,
"step": 8750
},
{
"epoch": 1.74,
"learning_rate": 0.0002801787677801088,
"loss": 1.001,
"step": 9000
},
{
"epoch": 1.79,
"learning_rate": 0.00027902153846814335,
"loss": 0.9979,
"step": 9250
},
{
"epoch": 1.83,
"learning_rate": 0.0002778340113038107,
"loss": 0.994,
"step": 9500
},
{
"epoch": 1.88,
"learning_rate": 0.00027661646515157913,
"loss": 0.9919,
"step": 9750
},
{
"epoch": 1.93,
"learning_rate": 0.0002753691859252116,
"loss": 0.9895,
"step": 10000
},
{
"epoch": 1.98,
"learning_rate": 0.0002740924665206254,
"loss": 0.9881,
"step": 10250
},
{
"epoch": 2.0,
"eval_alliteration_score": 0.4102959309494451,
"eval_harmonic_meter_score": 0.11239968726166898,
"eval_harmonic_rhyme_score": 0.2758249186922835,
"eval_meter_score": 0.30497402024728615,
"eval_rhyme_score": 0.7041029209046453,
"eval_runtime": 2601.7347,
"eval_samples_per_second": 1.038,
"eval_steps_per_second": 0.13,
"step": 10354
},
{
"epoch": 2.03,
"learning_rate": 0.000272786606747112,
"loss": 0.9675,
"step": 10500
},
{
"epoch": 2.08,
"learning_rate": 0.00027145191325693354,
"loss": 0.9528,
"step": 10750
},
{
"epoch": 2.12,
"learning_rate": 0.0002700886994733122,
"loss": 0.9521,
"step": 11000
},
{
"epoch": 2.17,
"learning_rate": 0.0002686972855168298,
"loss": 0.952,
"step": 11250
},
{
"epoch": 2.22,
"learning_rate": 0.00026727799813025446,
"loss": 0.9508,
"step": 11500
},
{
"epoch": 2.27,
"learning_rate": 0.0002658311706018126,
"loss": 0.9499,
"step": 11750
},
{
"epoch": 2.32,
"learning_rate": 0.000264357142686923,
"loss": 0.947,
"step": 12000
},
{
"epoch": 2.37,
"learning_rate": 0.0002628562605284134,
"loss": 0.9458,
"step": 12250
},
{
"epoch": 2.41,
"learning_rate": 0.0002613288765752361,
"loss": 0.9463,
"step": 12500
},
{
"epoch": 2.46,
"learning_rate": 0.0002597753494997032,
"loss": 0.9415,
"step": 12750
},
{
"epoch": 2.51,
"learning_rate": 0.00025819604411326055,
"loss": 0.9412,
"step": 13000
},
{
"epoch": 2.56,
"learning_rate": 0.0002565913312808196,
"loss": 0.9381,
"step": 13250
},
{
"epoch": 2.61,
"learning_rate": 0.00025496158783366803,
"loss": 0.9364,
"step": 13500
},
{
"epoch": 2.66,
"learning_rate": 0.00025330719648097947,
"loss": 0.9346,
"step": 13750
},
{
"epoch": 2.7,
"learning_rate": 0.0002516285457199425,
"loss": 0.9339,
"step": 14000
},
{
"epoch": 2.75,
"learning_rate": 0.00024992602974453066,
"loss": 0.9323,
"step": 14250
},
{
"epoch": 2.8,
"learning_rate": 0.000248200048352935,
"loss": 0.9284,
"step": 14500
},
{
"epoch": 2.85,
"learning_rate": 0.00024645100685368013,
"loss": 0.9269,
"step": 14750
},
{
"epoch": 2.9,
"learning_rate": 0.0002446793159704463,
"loss": 0.9252,
"step": 15000
},
{
"epoch": 2.95,
"learning_rate": 0.00024288539174562046,
"loss": 0.9243,
"step": 15250
},
{
"epoch": 2.99,
"learning_rate": 0.00024106965544259755,
"loss": 0.9211,
"step": 15500
},
{
"epoch": 3.0,
"eval_alliteration_score": 0.4366551398931154,
"eval_harmonic_meter_score": 0.11132001638651683,
"eval_harmonic_rhyme_score": 0.3526620304363614,
"eval_meter_score": 0.3150168760675491,
"eval_rhyme_score": 0.7517596634968854,
"eval_runtime": 2670.3356,
"eval_samples_per_second": 1.011,
"eval_steps_per_second": 0.127,
"step": 15531
},
{
"epoch": 3.04,
"learning_rate": 0.00023923253344685635,
"loss": 0.8862,
"step": 15750
},
{
"epoch": 3.09,
"learning_rate": 0.0002373744571658321,
"loss": 0.8819,
"step": 16000
},
{
"epoch": 3.14,
"learning_rate": 0.00023549586292761015,
"loss": 0.8832,
"step": 16250
},
{
"epoch": 3.19,
"learning_rate": 0.00023359719187846398,
"loss": 0.88,
"step": 16500
},
{
"epoch": 3.24,
"learning_rate": 0.00023167888987926164,
"loss": 0.881,
"step": 16750
},
{
"epoch": 3.28,
"learning_rate": 0.00022974140740076524,
"loss": 0.8792,
"step": 17000
},
{
"epoch": 3.33,
"learning_rate": 0.00022778519941784797,
"loss": 0.8803,
"step": 17250
},
{
"epoch": 3.38,
"learning_rate": 0.00022581072530265315,
"loss": 0.8774,
"step": 17500
},
{
"epoch": 3.43,
"learning_rate": 0.000223818448716721,
"loss": 0.8759,
"step": 17750
},
{
"epoch": 3.48,
"learning_rate": 0.000221808837502108,
"loss": 0.8737,
"step": 18000
},
{
"epoch": 3.53,
"learning_rate": 0.00021978236357152417,
"loss": 0.8717,
"step": 18250
},
{
"epoch": 3.57,
"learning_rate": 0.00021773950279751545,
"loss": 0.871,
"step": 18500
},
{
"epoch": 3.62,
"learning_rate": 0.0002156807349007151,
"loss": 0.8702,
"step": 18750
},
{
"epoch": 3.67,
"learning_rate": 0.00021360654333719197,
"loss": 0.8682,
"step": 19000
},
{
"epoch": 3.72,
"learning_rate": 0.00021151741518492183,
"loss": 0.8659,
"step": 19250
},
{
"epoch": 3.77,
"learning_rate": 0.00020941384102940748,
"loss": 0.8658,
"step": 19500
},
{
"epoch": 3.81,
"learning_rate": 0.00020729631484847592,
"loss": 0.8625,
"step": 19750
},
{
"epoch": 3.86,
"learning_rate": 0.00020516533389627854,
"loss": 0.8607,
"step": 20000
},
{
"epoch": 3.91,
"learning_rate": 0.00020302139858652179,
"loss": 0.8604,
"step": 20250
},
{
"epoch": 3.96,
"learning_rate": 0.00020086501237495636,
"loss": 0.8578,
"step": 20500
},
{
"epoch": 4.0,
"eval_alliteration_score": 0.4113367687177933,
"eval_harmonic_meter_score": 0.11086907691273673,
"eval_harmonic_rhyme_score": 0.4120432314176195,
"eval_meter_score": 0.30988666333117193,
"eval_rhyme_score": 0.7734423562173335,
"eval_runtime": 2384.5447,
"eval_samples_per_second": 1.132,
"eval_steps_per_second": 0.142,
"step": 20708
},
{
"epoch": 4.01,
"learning_rate": 0.00019869668164115161,
"loss": 0.8501,
"step": 20750
},
{
"epoch": 4.06,
"learning_rate": 0.00019651691556958343,
"loss": 0.8109,
"step": 21000
},
{
"epoch": 4.1,
"learning_rate": 0.00019432622603006387,
"loss": 0.8126,
"step": 21250
},
{
"epoch": 4.15,
"learning_rate": 0.00019212512745753942,
"loss": 0.8134,
"step": 21500
},
{
"epoch": 4.2,
"learning_rate": 0.00018991413673128776,
"loss": 0.8149,
"step": 21750
},
{
"epoch": 4.25,
"learning_rate": 0.0001876937730535397,
"loss": 0.8136,
"step": 22000
},
{
"epoch": 4.3,
"learning_rate": 0.00018546455782755646,
"loss": 0.8118,
"step": 22250
},
{
"epoch": 4.35,
"learning_rate": 0.0001832270145351892,
"loss": 0.811,
"step": 22500
},
{
"epoch": 4.39,
"learning_rate": 0.00018098166861395132,
"loss": 0.8101,
"step": 22750
},
{
"epoch": 4.44,
"learning_rate": 0.00017872904733363072,
"loss": 0.8102,
"step": 23000
},
{
"epoch": 4.49,
"learning_rate": 0.00017646967967247255,
"loss": 0.8072,
"step": 23250
},
{
"epoch": 4.54,
"learning_rate": 0.00017420409619295997,
"loss": 0.8071,
"step": 23500
},
{
"epoch": 4.59,
"learning_rate": 0.00017193282891722365,
"loss": 0.8049,
"step": 23750
},
{
"epoch": 4.64,
"learning_rate": 0.00016965641120210775,
"loss": 0.8035,
"step": 24000
},
{
"epoch": 4.68,
"learning_rate": 0.00016737537761392346,
"loss": 0.8033,
"step": 24250
},
{
"epoch": 4.73,
"learning_rate": 0.00016509026380291723,
"loss": 0.7998,
"step": 24500
},
{
"epoch": 4.78,
"learning_rate": 0.00016280160637748584,
"loss": 0.8009,
"step": 24750
},
{
"epoch": 4.83,
"learning_rate": 0.0001605099427781652,
"loss": 0.7986,
"step": 25000
},
{
"epoch": 4.88,
"learning_rate": 0.00015821581115142477,
"loss": 0.7954,
"step": 25250
},
{
"epoch": 4.93,
"learning_rate": 0.00015591975022329567,
"loss": 0.7945,
"step": 25500
},
{
"epoch": 4.97,
"learning_rate": 0.0001536222991728626,
"loss": 0.7938,
"step": 25750
},
{
"epoch": 5.0,
"eval_alliteration_score": 0.41413515189088657,
"eval_harmonic_meter_score": 0.1026455246284712,
"eval_harmonic_rhyme_score": 0.34333090390331705,
"eval_meter_score": 0.2882500313162144,
"eval_rhyme_score": 0.6791661142839286,
"eval_runtime": 2270.9559,
"eval_samples_per_second": 1.189,
"eval_steps_per_second": 0.149,
"step": 25885
},
{
"epoch": 5.02,
"learning_rate": 0.0001513239975056498,
"loss": 0.772,
"step": 26000
},
{
"epoch": 5.07,
"learning_rate": 0.00014902538492693,
"loss": 0.747,
"step": 26250
},
{
"epoch": 5.12,
"learning_rate": 0.00014672700121498661,
"loss": 0.7489,
"step": 26500
},
{
"epoch": 5.17,
"learning_rate": 0.00014442938609435875,
"loss": 0.748,
"step": 26750
},
{
"epoch": 5.22,
"learning_rate": 0.00014213307910909895,
"loss": 0.7485,
"step": 27000
},
{
"epoch": 5.26,
"learning_rate": 0.000139838619496073,
"loss": 0.7493,
"step": 27250
},
{
"epoch": 5.31,
"learning_rate": 0.00013754654605833231,
"loss": 0.7471,
"step": 27500
},
{
"epoch": 5.36,
"learning_rate": 0.0001352573970385877,
"loss": 0.7466,
"step": 27750
},
{
"epoch": 5.41,
"learning_rate": 0.00013297170999281534,
"loss": 0.7462,
"step": 28000
},
{
"epoch": 5.46,
"learning_rate": 0.0001306900216640233,
"loss": 0.7451,
"step": 28250
},
{
"epoch": 5.51,
"learning_rate": 0.0001284128678562096,
"loss": 0.7454,
"step": 28500
},
{
"epoch": 5.55,
"learning_rate": 0.0001261407833085402,
"loss": 0.7424,
"step": 28750
},
{
"epoch": 5.6,
"learning_rate": 0.00012387430156977778,
"loss": 0.7414,
"step": 29000
},
{
"epoch": 5.65,
"learning_rate": 0.00012161395487298911,
"loss": 0.7405,
"step": 29250
},
{
"epoch": 5.7,
"learning_rate": 0.00011936027401056238,
"loss": 0.7379,
"step": 29500
},
{
"epoch": 5.75,
"learning_rate": 0.00011711378820956193,
"loss": 0.738,
"step": 29750
},
{
"epoch": 5.79,
"learning_rate": 0.00011487502500745135,
"loss": 0.7363,
"step": 30000
},
{
"epoch": 5.84,
"learning_rate": 0.00011264451012821272,
"loss": 0.7347,
"step": 30250
},
{
"epoch": 5.89,
"learning_rate": 0.00011042276735889216,
"loss": 0.7321,
"step": 30500
},
{
"epoch": 5.94,
"learning_rate": 0.00010821031842659992,
"loss": 0.7332,
"step": 30750
},
{
"epoch": 5.99,
"learning_rate": 0.00010600768287599417,
"loss": 0.7313,
"step": 31000
},
{
"epoch": 6.0,
"eval_alliteration_score": 0.431023911710607,
"eval_harmonic_meter_score": 0.10282598031904473,
"eval_harmonic_rhyme_score": 0.43195083122804273,
"eval_meter_score": 0.3169839487583337,
"eval_rhyme_score": 0.7959113397342684,
"eval_runtime": 2406.9132,
"eval_samples_per_second": 1.122,
"eval_steps_per_second": 0.14,
"step": 31062
},
{
"epoch": 6.04,
"learning_rate": 0.00010381537794727796,
"loss": 0.6992,
"step": 31250
},
{
"epoch": 6.08,
"learning_rate": 0.00010163391845473613,
"loss": 0.6893,
"step": 31500
},
{
"epoch": 6.13,
"learning_rate": 9.946381666584314e-05,
"loss": 0.6898,
"step": 31750
},
{
"epoch": 6.18,
"learning_rate": 9.7305582180968e-05,
"loss": 0.69,
"step": 32000
},
{
"epoch": 6.23,
"learning_rate": 9.51597218137062e-05,
"loss": 0.6907,
"step": 32250
},
{
"epoch": 6.28,
"learning_rate": 9.302673947186545e-05,
"loss": 0.6903,
"step": 32500
},
{
"epoch": 6.33,
"learning_rate": 9.090713603913437e-05,
"loss": 0.69,
"step": 32750
},
{
"epoch": 6.37,
"learning_rate": 8.88014092574609e-05,
"loss": 0.688,
"step": 33000
},
{
"epoch": 6.42,
"learning_rate": 8.671005361016862e-05,
"loss": 0.687,
"step": 33250
},
{
"epoch": 6.47,
"learning_rate": 8.463356020583833e-05,
"loss": 0.6859,
"step": 33500
},
{
"epoch": 6.52,
"learning_rate": 8.257241666298198e-05,
"loss": 0.6874,
"step": 33750
},
{
"epoch": 6.57,
"learning_rate": 8.05271069955365e-05,
"loss": 0.6849,
"step": 34000
},
{
"epoch": 6.62,
"learning_rate": 7.849811149920353e-05,
"loss": 0.6831,
"step": 34250
},
{
"epoch": 6.66,
"learning_rate": 7.648590663866274e-05,
"loss": 0.6843,
"step": 34500
},
{
"epoch": 6.71,
"learning_rate": 7.449096493568466e-05,
"loss": 0.6832,
"step": 34750
},
{
"epoch": 6.76,
"learning_rate": 7.25137548581695e-05,
"loss": 0.6816,
"step": 35000
},
{
"epoch": 6.81,
"learning_rate": 7.055474071013765e-05,
"loss": 0.6814,
"step": 35250
},
{
"epoch": 6.86,
"learning_rate": 6.861438252269817e-05,
"loss": 0.6788,
"step": 35500
},
{
"epoch": 6.91,
"learning_rate": 6.669313594602076e-05,
"loss": 0.681,
"step": 35750
},
{
"epoch": 6.95,
"learning_rate": 6.479145214233655e-05,
"loss": 0.6787,
"step": 36000
},
{
"epoch": 7.0,
"eval_alliteration_score": 0.419920073778051,
"eval_harmonic_meter_score": 0.10124233945058883,
"eval_harmonic_rhyme_score": 0.4258480329916534,
"eval_meter_score": 0.3190283478146982,
"eval_rhyme_score": 0.7980873008918292,
"eval_runtime": 2453.1033,
"eval_samples_per_second": 1.101,
"eval_steps_per_second": 0.138,
"step": 36239
},
{
"epoch": 7.0,
"learning_rate": 6.290977767999224e-05,
"loss": 0.676,
"step": 36250
},
{
"epoch": 7.05,
"learning_rate": 6.10485544285837e-05,
"loss": 0.6422,
"step": 36500
},
{
"epoch": 7.1,
"learning_rate": 5.920821945519259e-05,
"loss": 0.6424,
"step": 36750
},
{
"epoch": 7.15,
"learning_rate": 5.7389204921750964e-05,
"loss": 0.6446,
"step": 37000
},
{
"epoch": 7.2,
"learning_rate": 5.5591937983557386e-05,
"loss": 0.6426,
"step": 37250
},
{
"epoch": 7.24,
"learning_rate": 5.381684068896904e-05,
"loss": 0.6429,
"step": 37500
},
{
"epoch": 7.29,
"learning_rate": 5.2064329880293066e-05,
"loss": 0.6415,
"step": 37750
},
{
"epoch": 7.34,
"learning_rate": 5.0334817095900465e-05,
"loss": 0.6416,
"step": 38000
},
{
"epoch": 7.39,
"learning_rate": 4.862870847358517e-05,
"loss": 0.6414,
"step": 38250
},
{
"epoch": 7.44,
"learning_rate": 4.694640465519168e-05,
"loss": 0.6424,
"step": 38500
},
{
"epoch": 7.48,
"learning_rate": 4.528830069253341e-05,
"loss": 0.6407,
"step": 38750
},
{
"epoch": 7.53,
"learning_rate": 4.365478595462316e-05,
"loss": 0.6414,
"step": 39000
},
{
"epoch": 7.58,
"learning_rate": 4.2046244036238635e-05,
"loss": 0.6394,
"step": 39250
},
{
"epoch": 7.63,
"learning_rate": 4.0463052667843626e-05,
"loss": 0.6377,
"step": 39500
},
{
"epoch": 7.68,
"learning_rate": 3.8905583626886546e-05,
"loss": 0.6384,
"step": 39750
},
{
"epoch": 7.73,
"learning_rate": 3.7374202650496385e-05,
"loss": 0.6387,
"step": 40000
},
{
"epoch": 7.77,
"learning_rate": 3.586926934959779e-05,
"loss": 0.6388,
"step": 40250
},
{
"epoch": 7.82,
"learning_rate": 3.439113712446411e-05,
"loss": 0.6369,
"step": 40500
},
{
"epoch": 7.87,
"learning_rate": 3.294015308172949e-05,
"loss": 0.637,
"step": 40750
},
{
"epoch": 7.92,
"learning_rate": 3.151665795287821e-05,
"loss": 0.6353,
"step": 41000
},
{
"epoch": 7.97,
"learning_rate": 3.0120986014231658e-05,
"loss": 0.636,
"step": 41250
},
{
"epoch": 8.0,
"eval_alliteration_score": 0.44240757714634893,
"eval_harmonic_meter_score": 0.11214598745179055,
"eval_harmonic_rhyme_score": 0.4118503741406305,
"eval_meter_score": 0.3255113242746067,
"eval_rhyme_score": 0.7902240726132346,
"eval_runtime": 2501.3058,
"eval_samples_per_second": 1.079,
"eval_steps_per_second": 0.135,
"step": 41416
},
{
"epoch": 8.02,
"learning_rate": 2.8753465008450878e-05,
"loss": 0.613,
"step": 41500
},
{
"epoch": 8.06,
"learning_rate": 2.7414416067573497e-05,
"loss": 0.6104,
"step": 41750
},
{
"epoch": 8.11,
"learning_rate": 2.6104153637602887e-05,
"loss": 0.6121,
"step": 42000
},
{
"epoch": 8.16,
"learning_rate": 2.4822985404667573e-05,
"loss": 0.6116,
"step": 42250
},
{
"epoch": 8.21,
"learning_rate": 2.3571212222767847e-05,
"loss": 0.6112,
"step": 42500
},
{
"epoch": 8.26,
"learning_rate": 2.2349128043127078e-05,
"loss": 0.61,
"step": 42750
},
{
"epoch": 8.31,
"learning_rate": 2.115701984516349e-05,
"loss": 0.611,
"step": 43000
},
{
"epoch": 8.35,
"learning_rate": 1.999516756909961e-05,
"loss": 0.6095,
"step": 43250
},
{
"epoch": 8.4,
"learning_rate": 1.886384405022437e-05,
"loss": 0.6114,
"step": 43500
},
{
"epoch": 8.45,
"learning_rate": 1.7763314954823966e-05,
"loss": 0.6113,
"step": 43750
},
{
"epoch": 8.5,
"learning_rate": 1.669383871779581e-05,
"loss": 0.6121,
"step": 44000
},
{
"epoch": 8.55,
"learning_rate": 1.5655666481960795e-05,
"loss": 0.6104,
"step": 44250
},
{
"epoch": 8.6,
"learning_rate": 1.4649042039088254e-05,
"loss": 0.6114,
"step": 44500
},
{
"epoch": 8.64,
"learning_rate": 1.3674201772646525e-05,
"loss": 0.6089,
"step": 44750
},
{
"epoch": 8.69,
"learning_rate": 1.2731374602293753e-05,
"loss": 0.6079,
"step": 45000
},
{
"epoch": 8.74,
"learning_rate": 1.1820781930120982e-05,
"loss": 0.6103,
"step": 45250
},
{
"epoch": 8.79,
"learning_rate": 1.0942637588661024e-05,
"loss": 0.6087,
"step": 45500
},
{
"epoch": 8.84,
"learning_rate": 1.0097147790674365e-05,
"loss": 0.6091,
"step": 45750
},
{
"epoch": 8.89,
"learning_rate": 9.28451108072481e-06,
"loss": 0.6088,
"step": 46000
},
{
"epoch": 8.93,
"learning_rate": 8.504918288555651e-06,
"loss": 0.6089,
"step": 46250
},
{
"epoch": 8.98,
"learning_rate": 7.758552484277575e-06,
"loss": 0.6089,
"step": 46500
},
{
"epoch": 9.0,
"eval_alliteration_score": 0.4335748792270531,
"eval_harmonic_meter_score": 0.1091721650313209,
"eval_harmonic_rhyme_score": 0.4075133294099665,
"eval_meter_score": 0.3241651625589595,
"eval_rhyme_score": 0.7938227877904658,
"eval_runtime": 2386.2391,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 0.142,
"step": 46593
},
{
"epoch": 9.03,
"learning_rate": 7.045588935378604e-06,
"loss": 0.6036,
"step": 46750
},
{
"epoch": 9.08,
"learning_rate": 6.366195065566454e-06,
"loss": 0.5958,
"step": 47000
},
{
"epoch": 9.13,
"learning_rate": 5.7205304154527955e-06,
"loss": 0.5959,
"step": 47250
},
{
"epoch": 9.18,
"learning_rate": 5.108746605088709e-06,
"loss": 0.5972,
"step": 47500
},
{
"epoch": 9.22,
"learning_rate": 4.530987298359967e-06,
"loss": 0.5972,
"step": 47750
},
{
"epoch": 9.27,
"learning_rate": 3.987388169250816e-06,
"loss": 0.599,
"step": 48000
},
{
"epoch": 9.32,
"learning_rate": 3.4780768699839313e-06,
"loss": 0.5964,
"step": 48250
},
{
"epoch": 9.37,
"learning_rate": 3.0031730010441957e-06,
"loss": 0.5978,
"step": 48500
},
{
"epoch": 9.42,
"learning_rate": 2.562788083092998e-06,
"loss": 0.5971,
"step": 48750
},
{
"epoch": 9.46,
"learning_rate": 2.1570255307802486e-06,
"loss": 0.5957,
"step": 49000
},
{
"epoch": 9.51,
"learning_rate": 1.785980628459638e-06,
"loss": 0.5962,
"step": 49250
},
{
"epoch": 9.56,
"learning_rate": 1.4497405078132517e-06,
"loss": 0.5976,
"step": 49500
},
{
"epoch": 9.61,
"learning_rate": 1.1483841273906391e-06,
"loss": 0.5977,
"step": 49750
},
{
"epoch": 9.66,
"learning_rate": 8.819822540670762e-07,
"loss": 0.596,
"step": 50000
},
{
"epoch": 9.71,
"learning_rate": 6.505974464255936e-07,
"loss": 0.5967,
"step": 50250
},
{
"epoch": 9.75,
"learning_rate": 4.542840400665104e-07,
"loss": 0.5965,
"step": 50500
},
{
"epoch": 9.8,
"learning_rate": 2.9308813484780755e-07,
"loss": 0.5977,
"step": 50750
},
{
"epoch": 9.85,
"learning_rate": 1.670475840597041e-07,
"loss": 0.5965,
"step": 51000
},
{
"epoch": 9.9,
"learning_rate": 7.619198553558414e-08,
"loss": 0.5948,
"step": 51250
},
{
"epoch": 9.95,
"learning_rate": 2.0542674701656824e-08,
"loss": 0.598,
"step": 51500
},
{
"epoch": 10.0,
"learning_rate": 1.1271956673053472e-10,
"loss": 0.5966,
"step": 51750
},
{
"epoch": 10.0,
"eval_alliteration_score": 0.43619909502262444,
"eval_harmonic_meter_score": 0.10679777933598275,
"eval_harmonic_rhyme_score": 0.4260903757100796,
"eval_meter_score": 0.31873257964794116,
"eval_rhyme_score": 0.8030446845582035,
"eval_runtime": 2387.7503,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 0.142,
"step": 51770
},
{
"epoch": 10.0,
"step": 51770,
"total_flos": 2.202815059032277e+18,
"train_loss": 0.12071779469207651,
"train_runtime": 12056.8595,
"train_samples_per_second": 549.652,
"train_steps_per_second": 4.294
}
],
"max_steps": 51770,
"num_train_epochs": 10,
"total_flos": 2.202815059032277e+18,
"trial_name": null,
"trial_params": null
}