ArabartModel / checkpoint-50745 /trainer_state.json
nour4286's picture
Training in progress, epoch 15, checkpoint
5abb99e verified
raw
history blame contribute delete
No virus
24.4 kB
{
"best_metric": 29.8805,
"best_model_checkpoint": "/content/drive/MyDrive/ArabartModel/checkpoint-50745",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 50745,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.32722513089005234,
"grad_norm": 1.563672423362732,
"learning_rate": 4.6727748691099475e-05,
"loss": 0.0195,
"step": 500
},
{
"epoch": 0.6544502617801047,
"grad_norm": 1.7397737503051758,
"learning_rate": 4.3455497382198955e-05,
"loss": 0.0244,
"step": 1000
},
{
"epoch": 0.981675392670157,
"grad_norm": 3.006633758544922,
"learning_rate": 4.018324607329843e-05,
"loss": 0.0293,
"step": 1500
},
{
"epoch": 1.0,
"eval_loss": 5.626201629638672,
"eval_rouge1": 25.2705,
"eval_rouge2": 9.9865,
"eval_rougeL": 21.8678,
"eval_runtime": 372.5145,
"eval_samples_per_second": 5.892,
"eval_steps_per_second": 0.591,
"step": 1528
},
{
"epoch": 1.3089005235602094,
"grad_norm": 3.4732136726379395,
"learning_rate": 4.3455497382198955e-05,
"loss": 0.0456,
"step": 2000
},
{
"epoch": 1.6361256544502618,
"grad_norm": 2.2575156688690186,
"learning_rate": 4.181937172774869e-05,
"loss": 0.0619,
"step": 2500
},
{
"epoch": 1.9633507853403143,
"grad_norm": 6.030032157897949,
"learning_rate": 4.018324607329843e-05,
"loss": 0.0993,
"step": 3000
},
{
"epoch": 2.0,
"eval_loss": 5.542174339294434,
"eval_rouge1": 25.0744,
"eval_rouge2": 9.8534,
"eval_rougeL": 21.6992,
"eval_runtime": 411.9676,
"eval_samples_per_second": 5.328,
"eval_steps_per_second": 0.534,
"step": 3056
},
{
"epoch": 2.2905759162303667,
"grad_norm": 6.401626110076904,
"learning_rate": 3.8547120418848174e-05,
"loss": 0.21,
"step": 3500
},
{
"epoch": 2.6178010471204187,
"grad_norm": 5.806931495666504,
"learning_rate": 3.691099476439791e-05,
"loss": 0.3595,
"step": 4000
},
{
"epoch": 2.945026178010471,
"grad_norm": 5.840355396270752,
"learning_rate": 3.5274869109947647e-05,
"loss": 0.6436,
"step": 4500
},
{
"epoch": 3.0,
"eval_loss": 3.927168369293213,
"eval_rouge1": 25.5914,
"eval_rouge2": 10.2904,
"eval_rougeL": 22.3089,
"eval_runtime": 412.3529,
"eval_samples_per_second": 5.323,
"eval_steps_per_second": 0.534,
"step": 4584
},
{
"epoch": 3.2722513089005236,
"grad_norm": 5.320337772369385,
"learning_rate": 3.3638743455497386e-05,
"loss": 0.7617,
"step": 5000
},
{
"epoch": 3.599476439790576,
"grad_norm": 5.708410739898682,
"learning_rate": 3.2002617801047126e-05,
"loss": 0.9186,
"step": 5500
},
{
"epoch": 3.9267015706806285,
"grad_norm": 6.043923377990723,
"learning_rate": 3.036649214659686e-05,
"loss": 1.112,
"step": 6000
},
{
"epoch": 4.0,
"eval_loss": 3.2077620029449463,
"eval_rouge1": 26.5258,
"eval_rouge2": 11.116,
"eval_rougeL": 23.3381,
"eval_runtime": 397.8337,
"eval_samples_per_second": 5.517,
"eval_steps_per_second": 0.553,
"step": 6112
},
{
"epoch": 4.2539267015706805,
"grad_norm": 5.179211139678955,
"learning_rate": 2.87303664921466e-05,
"loss": 1.0997,
"step": 6500
},
{
"epoch": 4.581151832460733,
"grad_norm": 4.997713565826416,
"learning_rate": 2.709424083769634e-05,
"loss": 1.0736,
"step": 7000
},
{
"epoch": 4.908376963350785,
"grad_norm": 5.441833972930908,
"learning_rate": 2.545811518324607e-05,
"loss": 1.0866,
"step": 7500
},
{
"epoch": 5.0,
"eval_loss": 3.3156800270080566,
"eval_rouge1": 26.4275,
"eval_rouge2": 10.8484,
"eval_rougeL": 23.0503,
"eval_runtime": 402.2407,
"eval_samples_per_second": 5.457,
"eval_steps_per_second": 0.547,
"step": 7640
},
{
"epoch": 5.2356020942408374,
"grad_norm": 4.417330741882324,
"learning_rate": 2.382198952879581e-05,
"loss": 0.9343,
"step": 8000
},
{
"epoch": 5.56282722513089,
"grad_norm": 5.149570465087891,
"learning_rate": 2.218586387434555e-05,
"loss": 0.9473,
"step": 8500
},
{
"epoch": 5.890052356020942,
"grad_norm": 5.305272102355957,
"learning_rate": 2.054973821989529e-05,
"loss": 0.9589,
"step": 9000
},
{
"epoch": 6.0,
"eval_loss": 3.433619737625122,
"eval_rouge1": 26.3479,
"eval_rouge2": 10.8544,
"eval_rougeL": 23.0206,
"eval_runtime": 385.2891,
"eval_samples_per_second": 5.697,
"eval_steps_per_second": 0.571,
"step": 9168
},
{
"epoch": 6.217277486910994,
"grad_norm": 5.96689510345459,
"learning_rate": 1.8913612565445027e-05,
"loss": 0.8437,
"step": 9500
},
{
"epoch": 6.544502617801047,
"grad_norm": 5.179261207580566,
"learning_rate": 1.7277486910994763e-05,
"loss": 0.8582,
"step": 10000
},
{
"epoch": 6.871727748691099,
"grad_norm": 4.92213773727417,
"learning_rate": 1.5641361256544503e-05,
"loss": 0.8669,
"step": 10500
},
{
"epoch": 7.0,
"eval_loss": 3.551990032196045,
"eval_rouge1": 26.0878,
"eval_rouge2": 10.6444,
"eval_rougeL": 22.7671,
"eval_runtime": 413.1448,
"eval_samples_per_second": 5.313,
"eval_steps_per_second": 0.533,
"step": 10696
},
{
"epoch": 7.198952879581152,
"grad_norm": 4.538914203643799,
"learning_rate": 1.4005235602094241e-05,
"loss": 0.7803,
"step": 11000
},
{
"epoch": 7.526178010471204,
"grad_norm": 5.420250415802002,
"learning_rate": 1.236910994764398e-05,
"loss": 0.7777,
"step": 11500
},
{
"epoch": 7.853403141361256,
"grad_norm": 4.503458023071289,
"learning_rate": 1.0732984293193717e-05,
"loss": 0.7894,
"step": 12000
},
{
"epoch": 8.0,
"eval_loss": 3.652216672897339,
"eval_rouge1": 26.1239,
"eval_rouge2": 10.689,
"eval_rougeL": 22.8303,
"eval_runtime": 389.5703,
"eval_samples_per_second": 5.634,
"eval_steps_per_second": 0.565,
"step": 12224
},
{
"epoch": 8.180628272251308,
"grad_norm": 4.9211812019348145,
"learning_rate": 9.096858638743457e-06,
"loss": 0.7505,
"step": 12500
},
{
"epoch": 8.507853403141361,
"grad_norm": 4.948258876800537,
"learning_rate": 7.4607329842931935e-06,
"loss": 0.731,
"step": 13000
},
{
"epoch": 8.835078534031414,
"grad_norm": 4.717357158660889,
"learning_rate": 5.824607329842932e-06,
"loss": 0.7331,
"step": 13500
},
{
"epoch": 9.0,
"eval_loss": 3.7317800521850586,
"eval_rouge1": 26.2374,
"eval_rouge2": 10.7166,
"eval_rougeL": 22.9298,
"eval_runtime": 385.8895,
"eval_samples_per_second": 5.688,
"eval_steps_per_second": 0.57,
"step": 13752
},
{
"epoch": 9.162303664921467,
"grad_norm": 4.790928363800049,
"learning_rate": 4.18848167539267e-06,
"loss": 0.7187,
"step": 14000
},
{
"epoch": 9.489528795811518,
"grad_norm": 4.424731254577637,
"learning_rate": 2.5523560209424085e-06,
"loss": 0.6988,
"step": 14500
},
{
"epoch": 9.81675392670157,
"grad_norm": 4.868335723876953,
"learning_rate": 9.162303664921465e-07,
"loss": 0.7027,
"step": 15000
},
{
"epoch": 10.0,
"eval_loss": 3.772279739379883,
"eval_rouge1": 26.1242,
"eval_rouge2": 10.7689,
"eval_rougeL": 22.8692,
"eval_runtime": 387.9211,
"eval_samples_per_second": 5.658,
"eval_steps_per_second": 0.567,
"step": 15280
},
{
"epoch": 4.141063318193962,
"grad_norm": 4.127313613891602,
"learning_rate": 3.619645560602013e-05,
"loss": 2.3856,
"step": 15500
},
{
"epoch": 4.2746460058776385,
"grad_norm": 3.7080917358398438,
"learning_rate": 3.575117998040787e-05,
"loss": 2.2343,
"step": 16000
},
{
"epoch": 4.408228693561314,
"grad_norm": 3.771442413330078,
"learning_rate": 3.530590435479562e-05,
"loss": 2.2498,
"step": 16500
},
{
"epoch": 4.54181138124499,
"grad_norm": 3.3883867263793945,
"learning_rate": 3.486062872918336e-05,
"loss": 2.2734,
"step": 17000
},
{
"epoch": 4.675394068928667,
"grad_norm": 3.82869029045105,
"learning_rate": 3.441535310357111e-05,
"loss": 2.2957,
"step": 17500
},
{
"epoch": 4.808976756612343,
"grad_norm": 3.480701208114624,
"learning_rate": 3.397007747795886e-05,
"loss": 2.255,
"step": 18000
},
{
"epoch": 4.94255944429602,
"grad_norm": 3.433276891708374,
"learning_rate": 3.3524801852346606e-05,
"loss": 2.2527,
"step": 18500
},
{
"epoch": 5.0,
"eval_loss": 2.624432325363159,
"eval_rouge1": 27.5622,
"eval_rouge2": 11.8188,
"eval_rougeL": 24.2339,
"eval_runtime": 862.1361,
"eval_samples_per_second": 5.439,
"eval_steps_per_second": 0.544,
"step": 18715
},
{
"epoch": 5.0761421319796955,
"grad_norm": 3.8290796279907227,
"learning_rate": 3.307952622673435e-05,
"loss": 2.1614,
"step": 19000
},
{
"epoch": 5.209724819663371,
"grad_norm": 4.148732662200928,
"learning_rate": 3.2634250601122095e-05,
"loss": 2.028,
"step": 19500
},
{
"epoch": 5.343307507347048,
"grad_norm": 3.9491941928863525,
"learning_rate": 3.218897497550984e-05,
"loss": 2.0413,
"step": 20000
},
{
"epoch": 5.476890195030724,
"grad_norm": 4.000278472900391,
"learning_rate": 3.174369934989759e-05,
"loss": 2.1058,
"step": 20500
},
{
"epoch": 5.6104728827144,
"grad_norm": 4.109215259552002,
"learning_rate": 3.129842372428534e-05,
"loss": 2.0563,
"step": 21000
},
{
"epoch": 5.744055570398077,
"grad_norm": 3.3794937133789062,
"learning_rate": 3.085314809867308e-05,
"loss": 2.0737,
"step": 21500
},
{
"epoch": 5.8776382580817526,
"grad_norm": 3.326864719390869,
"learning_rate": 3.0407872473060827e-05,
"loss": 2.085,
"step": 22000
},
{
"epoch": 6.0,
"eval_loss": 2.6439783573150635,
"eval_rouge1": 27.1127,
"eval_rouge2": 11.6364,
"eval_rougeL": 23.8039,
"eval_runtime": 879.7933,
"eval_samples_per_second": 5.33,
"eval_steps_per_second": 0.533,
"step": 22458
},
{
"epoch": 6.011220945765428,
"grad_norm": 3.5264782905578613,
"learning_rate": 2.9962596847448572e-05,
"loss": 1.8559,
"step": 22500
},
{
"epoch": 6.144803633449105,
"grad_norm": 3.7577388286590576,
"learning_rate": 2.951732122183632e-05,
"loss": 1.8982,
"step": 23000
},
{
"epoch": 6.278386321132781,
"grad_norm": 3.2084107398986816,
"learning_rate": 2.907204559622406e-05,
"loss": 1.8801,
"step": 23500
},
{
"epoch": 6.411969008816458,
"grad_norm": 4.275125503540039,
"learning_rate": 2.8626769970611812e-05,
"loss": 1.9144,
"step": 24000
},
{
"epoch": 6.545551696500134,
"grad_norm": 3.5503015518188477,
"learning_rate": 2.8181494344999553e-05,
"loss": 1.9229,
"step": 24500
},
{
"epoch": 6.67913438418381,
"grad_norm": 3.489950656890869,
"learning_rate": 2.77362187193873e-05,
"loss": 1.9033,
"step": 25000
},
{
"epoch": 6.812717071867486,
"grad_norm": 3.5493781566619873,
"learning_rate": 2.7290943093775046e-05,
"loss": 1.9058,
"step": 25500
},
{
"epoch": 6.946299759551162,
"grad_norm": 3.603605031967163,
"learning_rate": 2.6845667468162793e-05,
"loss": 1.8774,
"step": 26000
},
{
"epoch": 7.0,
"eval_loss": 2.670841693878174,
"eval_rouge1": 27.7286,
"eval_rouge2": 11.9572,
"eval_rougeL": 24.3316,
"eval_runtime": 903.5292,
"eval_samples_per_second": 5.19,
"eval_steps_per_second": 0.519,
"step": 26201
},
{
"epoch": 7.079882447234838,
"grad_norm": 3.9646942615509033,
"learning_rate": 2.640039184255054e-05,
"loss": 1.8442,
"step": 26500
},
{
"epoch": 7.213465134918515,
"grad_norm": 4.019280433654785,
"learning_rate": 2.5955116216938286e-05,
"loss": 1.729,
"step": 27000
},
{
"epoch": 7.347047822602191,
"grad_norm": 3.9131412506103516,
"learning_rate": 2.5509840591326034e-05,
"loss": 1.7656,
"step": 27500
},
{
"epoch": 7.480630510285867,
"grad_norm": 3.336970806121826,
"learning_rate": 2.5064564965713778e-05,
"loss": 1.7733,
"step": 28000
},
{
"epoch": 7.614213197969543,
"grad_norm": 3.7517452239990234,
"learning_rate": 2.4619289340101523e-05,
"loss": 1.7842,
"step": 28500
},
{
"epoch": 7.747795885653219,
"grad_norm": 3.8601882457733154,
"learning_rate": 2.4174013714489267e-05,
"loss": 1.7876,
"step": 29000
},
{
"epoch": 7.881378573336896,
"grad_norm": 3.928703784942627,
"learning_rate": 2.372873808887702e-05,
"loss": 1.7826,
"step": 29500
},
{
"epoch": 8.0,
"eval_loss": 2.7165374755859375,
"eval_rouge1": 27.5923,
"eval_rouge2": 11.9,
"eval_rougeL": 24.2586,
"eval_runtime": 898.7001,
"eval_samples_per_second": 5.218,
"eval_steps_per_second": 0.522,
"step": 29944
},
{
"epoch": 9.151921903599757,
"grad_norm": 4.411174774169922,
"learning_rate": 1.949359365466748e-05,
"loss": 1.3756,
"step": 30000
},
{
"epoch": 9.304453935326418,
"grad_norm": 4.3166985511779785,
"learning_rate": 1.8985153548911936e-05,
"loss": 1.4059,
"step": 30500
},
{
"epoch": 9.45698596705308,
"grad_norm": 3.6181671619415283,
"learning_rate": 1.8476713443156397e-05,
"loss": 1.4086,
"step": 31000
},
{
"epoch": 9.609517998779744,
"grad_norm": 4.077247142791748,
"learning_rate": 1.7968273337400855e-05,
"loss": 1.417,
"step": 31500
},
{
"epoch": 9.762050030506407,
"grad_norm": 3.4715030193328857,
"learning_rate": 1.7459833231645312e-05,
"loss": 1.4021,
"step": 32000
},
{
"epoch": 9.91458206223307,
"grad_norm": 3.7810068130493164,
"learning_rate": 1.6951393125889773e-05,
"loss": 1.4047,
"step": 32500
},
{
"epoch": 10.0,
"eval_loss": 2.808703899383545,
"eval_rouge1": 27.8772,
"eval_rouge2": 12.0303,
"eval_rougeL": 24.5034,
"eval_runtime": 743.946,
"eval_samples_per_second": 5.877,
"eval_steps_per_second": 0.589,
"step": 32780
},
{
"epoch": 10.06711409395973,
"grad_norm": 3.981015682220459,
"learning_rate": 1.644295302013423e-05,
"loss": 1.3691,
"step": 33000
},
{
"epoch": 10.219646125686394,
"grad_norm": 3.829094648361206,
"learning_rate": 1.5934512914378688e-05,
"loss": 1.3323,
"step": 33500
},
{
"epoch": 10.372178157413057,
"grad_norm": 3.636704206466675,
"learning_rate": 1.5426072808623145e-05,
"loss": 1.3337,
"step": 34000
},
{
"epoch": 10.52471018913972,
"grad_norm": 4.167761325836182,
"learning_rate": 1.4917632702867604e-05,
"loss": 1.3512,
"step": 34500
},
{
"epoch": 10.677242220866383,
"grad_norm": 3.829911470413208,
"learning_rate": 1.4409192597112062e-05,
"loss": 1.3453,
"step": 35000
},
{
"epoch": 10.829774252593044,
"grad_norm": 4.00424861907959,
"learning_rate": 1.390075249135652e-05,
"loss": 1.3308,
"step": 35500
},
{
"epoch": 10.982306284319707,
"grad_norm": 4.3669657707214355,
"learning_rate": 1.3392312385600977e-05,
"loss": 1.3781,
"step": 36000
},
{
"epoch": 11.0,
"eval_loss": 2.769470453262329,
"eval_rouge1": 28.0814,
"eval_rouge2": 12.3353,
"eval_rougeL": 24.7091,
"eval_runtime": 745.2741,
"eval_samples_per_second": 5.866,
"eval_steps_per_second": 0.588,
"step": 36058
},
{
"epoch": 11.13483831604637,
"grad_norm": 3.7836453914642334,
"learning_rate": 1.2883872279845436e-05,
"loss": 1.275,
"step": 36500
},
{
"epoch": 11.287370347773033,
"grad_norm": 4.0213751792907715,
"learning_rate": 1.2375432174089893e-05,
"loss": 1.2572,
"step": 37000
},
{
"epoch": 11.439902379499696,
"grad_norm": 3.626115083694458,
"learning_rate": 1.186699206833435e-05,
"loss": 1.2687,
"step": 37500
},
{
"epoch": 11.592434411226357,
"grad_norm": 4.4103007316589355,
"learning_rate": 1.1358551962578808e-05,
"loss": 1.2807,
"step": 38000
},
{
"epoch": 11.74496644295302,
"grad_norm": 3.681063413619995,
"learning_rate": 1.0850111856823267e-05,
"loss": 1.2986,
"step": 38500
},
{
"epoch": 11.897498474679683,
"grad_norm": 4.0368170738220215,
"learning_rate": 1.0341671751067725e-05,
"loss": 1.2784,
"step": 39000
},
{
"epoch": 12.0,
"eval_loss": 2.83615779876709,
"eval_rouge1": 28.2406,
"eval_rouge2": 12.197,
"eval_rougeL": 24.7367,
"eval_runtime": 733.2323,
"eval_samples_per_second": 5.963,
"eval_steps_per_second": 0.597,
"step": 39336
},
{
"epoch": 12.050030506406346,
"grad_norm": 4.6332173347473145,
"learning_rate": 9.833231645312182e-06,
"loss": 1.2071,
"step": 39500
},
{
"epoch": 12.202562538133009,
"grad_norm": 3.7481937408447266,
"learning_rate": 9.324791539556641e-06,
"loss": 1.2085,
"step": 40000
},
{
"epoch": 12.35509456985967,
"grad_norm": 4.701822757720947,
"learning_rate": 8.816351433801099e-06,
"loss": 1.2225,
"step": 40500
},
{
"epoch": 12.507626601586333,
"grad_norm": 4.165287494659424,
"learning_rate": 8.307911328045556e-06,
"loss": 1.2187,
"step": 41000
},
{
"epoch": 12.660158633312996,
"grad_norm": 4.279840469360352,
"learning_rate": 7.799471222290014e-06,
"loss": 1.2157,
"step": 41500
},
{
"epoch": 12.812690665039659,
"grad_norm": 4.762269973754883,
"learning_rate": 7.291031116534472e-06,
"loss": 1.2277,
"step": 42000
},
{
"epoch": 12.965222696766322,
"grad_norm": 4.380380153656006,
"learning_rate": 6.782591010778931e-06,
"loss": 1.2371,
"step": 42500
},
{
"epoch": 13.0,
"eval_loss": 2.921924114227295,
"eval_rouge1": 27.8556,
"eval_rouge2": 12.033,
"eval_rougeL": 24.4386,
"eval_runtime": 791.9797,
"eval_samples_per_second": 5.52,
"eval_steps_per_second": 0.553,
"step": 42614
},
{
"epoch": 12.710611882944132,
"grad_norm": 4.347652435302734,
"learning_rate": 7.631293723519558e-06,
"loss": 1.6632,
"step": 43000
},
{
"epoch": 12.858409695536507,
"grad_norm": 4.261178493499756,
"learning_rate": 7.138634348211647e-06,
"loss": 1.627,
"step": 43500
},
{
"epoch": 13.0,
"eval_loss": 3.073051929473877,
"eval_rouge1": 29.5215,
"eval_rouge2": 13.1839,
"eval_rougeL": 25.617,
"eval_runtime": 744.9289,
"eval_samples_per_second": 6.049,
"eval_steps_per_second": 0.605,
"step": 43979
},
{
"epoch": 13.00620750812888,
"grad_norm": 5.098820686340332,
"learning_rate": 6.645974972903735e-06,
"loss": 1.5807,
"step": 44000
},
{
"epoch": 13.154005320721254,
"grad_norm": 3.9083046913146973,
"learning_rate": 6.153315597595822e-06,
"loss": 1.562,
"step": 44500
},
{
"epoch": 13.301803133313626,
"grad_norm": 4.274121284484863,
"learning_rate": 5.66065622228791e-06,
"loss": 1.5721,
"step": 45000
},
{
"epoch": 13.449600945906,
"grad_norm": 4.222695827484131,
"learning_rate": 5.167996846979999e-06,
"loss": 1.5415,
"step": 45500
},
{
"epoch": 13.597398758498374,
"grad_norm": 4.467597484588623,
"learning_rate": 4.675337471672086e-06,
"loss": 1.5718,
"step": 46000
},
{
"epoch": 13.745196571090748,
"grad_norm": 4.109827995300293,
"learning_rate": 4.182678096364174e-06,
"loss": 1.5285,
"step": 46500
},
{
"epoch": 13.89299438368312,
"grad_norm": 4.74594783782959,
"learning_rate": 3.690018721056262e-06,
"loss": 1.5215,
"step": 47000
},
{
"epoch": 14.0,
"eval_loss": 3.0527069568634033,
"eval_rouge1": 29.7103,
"eval_rouge2": 13.2821,
"eval_rougeL": 25.7681,
"eval_runtime": 741.3618,
"eval_samples_per_second": 6.078,
"eval_steps_per_second": 0.608,
"step": 47362
},
{
"epoch": 14.040792196275495,
"grad_norm": 4.313952922821045,
"learning_rate": 1.4898019509311262e-05,
"loss": 1.4804,
"step": 47500
},
{
"epoch": 14.18859000886787,
"grad_norm": 4.262555122375488,
"learning_rate": 1.4528524977830329e-05,
"loss": 1.534,
"step": 48000
},
{
"epoch": 14.336387821460242,
"grad_norm": 4.611397743225098,
"learning_rate": 1.4159030446349395e-05,
"loss": 1.5168,
"step": 48500
},
{
"epoch": 14.484185634052617,
"grad_norm": 4.340530872344971,
"learning_rate": 1.378953591486846e-05,
"loss": 1.5254,
"step": 49000
},
{
"epoch": 14.63198344664499,
"grad_norm": 4.0280375480651855,
"learning_rate": 1.3420041383387528e-05,
"loss": 1.5317,
"step": 49500
},
{
"epoch": 14.779781259237364,
"grad_norm": 4.776717662811279,
"learning_rate": 1.3050546851906593e-05,
"loss": 1.5312,
"step": 50000
},
{
"epoch": 14.927579071829737,
"grad_norm": 4.440686225891113,
"learning_rate": 1.2681052320425657e-05,
"loss": 1.5348,
"step": 50500
},
{
"epoch": 15.0,
"eval_loss": 3.036102056503296,
"eval_rouge1": 29.8805,
"eval_rouge2": 13.4576,
"eval_rougeL": 25.9729,
"eval_runtime": 726.153,
"eval_samples_per_second": 6.205,
"eval_steps_per_second": 0.621,
"step": 50745
}
],
"logging_steps": 500,
"max_steps": 67660,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.564073437508772e+17,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}