ArabartModel / checkpoint-36058 /trainer_state.json
nour4286's picture
Training in progress, epoch 11, checkpoint
986cf92 verified
raw
history blame
17.9 kB
{
"best_metric": 28.0814,
"best_model_checkpoint": "/content/drive/MyDrive/ArabartModel/checkpoint-36058",
"epoch": 11.0,
"eval_steps": 500,
"global_step": 36058,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.32722513089005234,
"grad_norm": 1.563672423362732,
"learning_rate": 4.6727748691099475e-05,
"loss": 0.0195,
"step": 500
},
{
"epoch": 0.6544502617801047,
"grad_norm": 1.7397737503051758,
"learning_rate": 4.3455497382198955e-05,
"loss": 0.0244,
"step": 1000
},
{
"epoch": 0.981675392670157,
"grad_norm": 3.006633758544922,
"learning_rate": 4.018324607329843e-05,
"loss": 0.0293,
"step": 1500
},
{
"epoch": 1.0,
"eval_loss": 5.626201629638672,
"eval_rouge1": 25.2705,
"eval_rouge2": 9.9865,
"eval_rougeL": 21.8678,
"eval_runtime": 372.5145,
"eval_samples_per_second": 5.892,
"eval_steps_per_second": 0.591,
"step": 1528
},
{
"epoch": 1.3089005235602094,
"grad_norm": 3.4732136726379395,
"learning_rate": 4.3455497382198955e-05,
"loss": 0.0456,
"step": 2000
},
{
"epoch": 1.6361256544502618,
"grad_norm": 2.2575156688690186,
"learning_rate": 4.181937172774869e-05,
"loss": 0.0619,
"step": 2500
},
{
"epoch": 1.9633507853403143,
"grad_norm": 6.030032157897949,
"learning_rate": 4.018324607329843e-05,
"loss": 0.0993,
"step": 3000
},
{
"epoch": 2.0,
"eval_loss": 5.542174339294434,
"eval_rouge1": 25.0744,
"eval_rouge2": 9.8534,
"eval_rougeL": 21.6992,
"eval_runtime": 411.9676,
"eval_samples_per_second": 5.328,
"eval_steps_per_second": 0.534,
"step": 3056
},
{
"epoch": 2.2905759162303667,
"grad_norm": 6.401626110076904,
"learning_rate": 3.8547120418848174e-05,
"loss": 0.21,
"step": 3500
},
{
"epoch": 2.6178010471204187,
"grad_norm": 5.806931495666504,
"learning_rate": 3.691099476439791e-05,
"loss": 0.3595,
"step": 4000
},
{
"epoch": 2.945026178010471,
"grad_norm": 5.840355396270752,
"learning_rate": 3.5274869109947647e-05,
"loss": 0.6436,
"step": 4500
},
{
"epoch": 3.0,
"eval_loss": 3.927168369293213,
"eval_rouge1": 25.5914,
"eval_rouge2": 10.2904,
"eval_rougeL": 22.3089,
"eval_runtime": 412.3529,
"eval_samples_per_second": 5.323,
"eval_steps_per_second": 0.534,
"step": 4584
},
{
"epoch": 3.2722513089005236,
"grad_norm": 5.320337772369385,
"learning_rate": 3.3638743455497386e-05,
"loss": 0.7617,
"step": 5000
},
{
"epoch": 3.599476439790576,
"grad_norm": 5.708410739898682,
"learning_rate": 3.2002617801047126e-05,
"loss": 0.9186,
"step": 5500
},
{
"epoch": 3.9267015706806285,
"grad_norm": 6.043923377990723,
"learning_rate": 3.036649214659686e-05,
"loss": 1.112,
"step": 6000
},
{
"epoch": 4.0,
"eval_loss": 3.2077620029449463,
"eval_rouge1": 26.5258,
"eval_rouge2": 11.116,
"eval_rougeL": 23.3381,
"eval_runtime": 397.8337,
"eval_samples_per_second": 5.517,
"eval_steps_per_second": 0.553,
"step": 6112
},
{
"epoch": 4.2539267015706805,
"grad_norm": 5.179211139678955,
"learning_rate": 2.87303664921466e-05,
"loss": 1.0997,
"step": 6500
},
{
"epoch": 4.581151832460733,
"grad_norm": 4.997713565826416,
"learning_rate": 2.709424083769634e-05,
"loss": 1.0736,
"step": 7000
},
{
"epoch": 4.908376963350785,
"grad_norm": 5.441833972930908,
"learning_rate": 2.545811518324607e-05,
"loss": 1.0866,
"step": 7500
},
{
"epoch": 5.0,
"eval_loss": 3.3156800270080566,
"eval_rouge1": 26.4275,
"eval_rouge2": 10.8484,
"eval_rougeL": 23.0503,
"eval_runtime": 402.2407,
"eval_samples_per_second": 5.457,
"eval_steps_per_second": 0.547,
"step": 7640
},
{
"epoch": 5.2356020942408374,
"grad_norm": 4.417330741882324,
"learning_rate": 2.382198952879581e-05,
"loss": 0.9343,
"step": 8000
},
{
"epoch": 5.56282722513089,
"grad_norm": 5.149570465087891,
"learning_rate": 2.218586387434555e-05,
"loss": 0.9473,
"step": 8500
},
{
"epoch": 5.890052356020942,
"grad_norm": 5.305272102355957,
"learning_rate": 2.054973821989529e-05,
"loss": 0.9589,
"step": 9000
},
{
"epoch": 6.0,
"eval_loss": 3.433619737625122,
"eval_rouge1": 26.3479,
"eval_rouge2": 10.8544,
"eval_rougeL": 23.0206,
"eval_runtime": 385.2891,
"eval_samples_per_second": 5.697,
"eval_steps_per_second": 0.571,
"step": 9168
},
{
"epoch": 6.217277486910994,
"grad_norm": 5.96689510345459,
"learning_rate": 1.8913612565445027e-05,
"loss": 0.8437,
"step": 9500
},
{
"epoch": 6.544502617801047,
"grad_norm": 5.179261207580566,
"learning_rate": 1.7277486910994763e-05,
"loss": 0.8582,
"step": 10000
},
{
"epoch": 6.871727748691099,
"grad_norm": 4.92213773727417,
"learning_rate": 1.5641361256544503e-05,
"loss": 0.8669,
"step": 10500
},
{
"epoch": 7.0,
"eval_loss": 3.551990032196045,
"eval_rouge1": 26.0878,
"eval_rouge2": 10.6444,
"eval_rougeL": 22.7671,
"eval_runtime": 413.1448,
"eval_samples_per_second": 5.313,
"eval_steps_per_second": 0.533,
"step": 10696
},
{
"epoch": 7.198952879581152,
"grad_norm": 4.538914203643799,
"learning_rate": 1.4005235602094241e-05,
"loss": 0.7803,
"step": 11000
},
{
"epoch": 7.526178010471204,
"grad_norm": 5.420250415802002,
"learning_rate": 1.236910994764398e-05,
"loss": 0.7777,
"step": 11500
},
{
"epoch": 7.853403141361256,
"grad_norm": 4.503458023071289,
"learning_rate": 1.0732984293193717e-05,
"loss": 0.7894,
"step": 12000
},
{
"epoch": 8.0,
"eval_loss": 3.652216672897339,
"eval_rouge1": 26.1239,
"eval_rouge2": 10.689,
"eval_rougeL": 22.8303,
"eval_runtime": 389.5703,
"eval_samples_per_second": 5.634,
"eval_steps_per_second": 0.565,
"step": 12224
},
{
"epoch": 8.180628272251308,
"grad_norm": 4.9211812019348145,
"learning_rate": 9.096858638743457e-06,
"loss": 0.7505,
"step": 12500
},
{
"epoch": 8.507853403141361,
"grad_norm": 4.948258876800537,
"learning_rate": 7.4607329842931935e-06,
"loss": 0.731,
"step": 13000
},
{
"epoch": 8.835078534031414,
"grad_norm": 4.717357158660889,
"learning_rate": 5.824607329842932e-06,
"loss": 0.7331,
"step": 13500
},
{
"epoch": 9.0,
"eval_loss": 3.7317800521850586,
"eval_rouge1": 26.2374,
"eval_rouge2": 10.7166,
"eval_rougeL": 22.9298,
"eval_runtime": 385.8895,
"eval_samples_per_second": 5.688,
"eval_steps_per_second": 0.57,
"step": 13752
},
{
"epoch": 9.162303664921467,
"grad_norm": 4.790928363800049,
"learning_rate": 4.18848167539267e-06,
"loss": 0.7187,
"step": 14000
},
{
"epoch": 9.489528795811518,
"grad_norm": 4.424731254577637,
"learning_rate": 2.5523560209424085e-06,
"loss": 0.6988,
"step": 14500
},
{
"epoch": 9.81675392670157,
"grad_norm": 4.868335723876953,
"learning_rate": 9.162303664921465e-07,
"loss": 0.7027,
"step": 15000
},
{
"epoch": 10.0,
"eval_loss": 3.772279739379883,
"eval_rouge1": 26.1242,
"eval_rouge2": 10.7689,
"eval_rougeL": 22.8692,
"eval_runtime": 387.9211,
"eval_samples_per_second": 5.658,
"eval_steps_per_second": 0.567,
"step": 15280
},
{
"epoch": 4.141063318193962,
"grad_norm": 4.127313613891602,
"learning_rate": 3.619645560602013e-05,
"loss": 2.3856,
"step": 15500
},
{
"epoch": 4.2746460058776385,
"grad_norm": 3.7080917358398438,
"learning_rate": 3.575117998040787e-05,
"loss": 2.2343,
"step": 16000
},
{
"epoch": 4.408228693561314,
"grad_norm": 3.771442413330078,
"learning_rate": 3.530590435479562e-05,
"loss": 2.2498,
"step": 16500
},
{
"epoch": 4.54181138124499,
"grad_norm": 3.3883867263793945,
"learning_rate": 3.486062872918336e-05,
"loss": 2.2734,
"step": 17000
},
{
"epoch": 4.675394068928667,
"grad_norm": 3.82869029045105,
"learning_rate": 3.441535310357111e-05,
"loss": 2.2957,
"step": 17500
},
{
"epoch": 4.808976756612343,
"grad_norm": 3.480701208114624,
"learning_rate": 3.397007747795886e-05,
"loss": 2.255,
"step": 18000
},
{
"epoch": 4.94255944429602,
"grad_norm": 3.433276891708374,
"learning_rate": 3.3524801852346606e-05,
"loss": 2.2527,
"step": 18500
},
{
"epoch": 5.0,
"eval_loss": 2.624432325363159,
"eval_rouge1": 27.5622,
"eval_rouge2": 11.8188,
"eval_rougeL": 24.2339,
"eval_runtime": 862.1361,
"eval_samples_per_second": 5.439,
"eval_steps_per_second": 0.544,
"step": 18715
},
{
"epoch": 5.0761421319796955,
"grad_norm": 3.8290796279907227,
"learning_rate": 3.307952622673435e-05,
"loss": 2.1614,
"step": 19000
},
{
"epoch": 5.209724819663371,
"grad_norm": 4.148732662200928,
"learning_rate": 3.2634250601122095e-05,
"loss": 2.028,
"step": 19500
},
{
"epoch": 5.343307507347048,
"grad_norm": 3.9491941928863525,
"learning_rate": 3.218897497550984e-05,
"loss": 2.0413,
"step": 20000
},
{
"epoch": 5.476890195030724,
"grad_norm": 4.000278472900391,
"learning_rate": 3.174369934989759e-05,
"loss": 2.1058,
"step": 20500
},
{
"epoch": 5.6104728827144,
"grad_norm": 4.109215259552002,
"learning_rate": 3.129842372428534e-05,
"loss": 2.0563,
"step": 21000
},
{
"epoch": 5.744055570398077,
"grad_norm": 3.3794937133789062,
"learning_rate": 3.085314809867308e-05,
"loss": 2.0737,
"step": 21500
},
{
"epoch": 5.8776382580817526,
"grad_norm": 3.326864719390869,
"learning_rate": 3.0407872473060827e-05,
"loss": 2.085,
"step": 22000
},
{
"epoch": 6.0,
"eval_loss": 2.6439783573150635,
"eval_rouge1": 27.1127,
"eval_rouge2": 11.6364,
"eval_rougeL": 23.8039,
"eval_runtime": 879.7933,
"eval_samples_per_second": 5.33,
"eval_steps_per_second": 0.533,
"step": 22458
},
{
"epoch": 6.011220945765428,
"grad_norm": 3.5264782905578613,
"learning_rate": 2.9962596847448572e-05,
"loss": 1.8559,
"step": 22500
},
{
"epoch": 6.144803633449105,
"grad_norm": 3.7577388286590576,
"learning_rate": 2.951732122183632e-05,
"loss": 1.8982,
"step": 23000
},
{
"epoch": 6.278386321132781,
"grad_norm": 3.2084107398986816,
"learning_rate": 2.907204559622406e-05,
"loss": 1.8801,
"step": 23500
},
{
"epoch": 6.411969008816458,
"grad_norm": 4.275125503540039,
"learning_rate": 2.8626769970611812e-05,
"loss": 1.9144,
"step": 24000
},
{
"epoch": 6.545551696500134,
"grad_norm": 3.5503015518188477,
"learning_rate": 2.8181494344999553e-05,
"loss": 1.9229,
"step": 24500
},
{
"epoch": 6.67913438418381,
"grad_norm": 3.489950656890869,
"learning_rate": 2.77362187193873e-05,
"loss": 1.9033,
"step": 25000
},
{
"epoch": 6.812717071867486,
"grad_norm": 3.5493781566619873,
"learning_rate": 2.7290943093775046e-05,
"loss": 1.9058,
"step": 25500
},
{
"epoch": 6.946299759551162,
"grad_norm": 3.603605031967163,
"learning_rate": 2.6845667468162793e-05,
"loss": 1.8774,
"step": 26000
},
{
"epoch": 7.0,
"eval_loss": 2.670841693878174,
"eval_rouge1": 27.7286,
"eval_rouge2": 11.9572,
"eval_rougeL": 24.3316,
"eval_runtime": 903.5292,
"eval_samples_per_second": 5.19,
"eval_steps_per_second": 0.519,
"step": 26201
},
{
"epoch": 7.079882447234838,
"grad_norm": 3.9646942615509033,
"learning_rate": 2.640039184255054e-05,
"loss": 1.8442,
"step": 26500
},
{
"epoch": 7.213465134918515,
"grad_norm": 4.019280433654785,
"learning_rate": 2.5955116216938286e-05,
"loss": 1.729,
"step": 27000
},
{
"epoch": 7.347047822602191,
"grad_norm": 3.9131412506103516,
"learning_rate": 2.5509840591326034e-05,
"loss": 1.7656,
"step": 27500
},
{
"epoch": 7.480630510285867,
"grad_norm": 3.336970806121826,
"learning_rate": 2.5064564965713778e-05,
"loss": 1.7733,
"step": 28000
},
{
"epoch": 7.614213197969543,
"grad_norm": 3.7517452239990234,
"learning_rate": 2.4619289340101523e-05,
"loss": 1.7842,
"step": 28500
},
{
"epoch": 7.747795885653219,
"grad_norm": 3.8601882457733154,
"learning_rate": 2.4174013714489267e-05,
"loss": 1.7876,
"step": 29000
},
{
"epoch": 7.881378573336896,
"grad_norm": 3.928703784942627,
"learning_rate": 2.372873808887702e-05,
"loss": 1.7826,
"step": 29500
},
{
"epoch": 8.0,
"eval_loss": 2.7165374755859375,
"eval_rouge1": 27.5923,
"eval_rouge2": 11.9,
"eval_rougeL": 24.2586,
"eval_runtime": 898.7001,
"eval_samples_per_second": 5.218,
"eval_steps_per_second": 0.522,
"step": 29944
},
{
"epoch": 9.151921903599757,
"grad_norm": 4.411174774169922,
"learning_rate": 1.949359365466748e-05,
"loss": 1.3756,
"step": 30000
},
{
"epoch": 9.304453935326418,
"grad_norm": 4.3166985511779785,
"learning_rate": 1.8985153548911936e-05,
"loss": 1.4059,
"step": 30500
},
{
"epoch": 9.45698596705308,
"grad_norm": 3.6181671619415283,
"learning_rate": 1.8476713443156397e-05,
"loss": 1.4086,
"step": 31000
},
{
"epoch": 9.609517998779744,
"grad_norm": 4.077247142791748,
"learning_rate": 1.7968273337400855e-05,
"loss": 1.417,
"step": 31500
},
{
"epoch": 9.762050030506407,
"grad_norm": 3.4715030193328857,
"learning_rate": 1.7459833231645312e-05,
"loss": 1.4021,
"step": 32000
},
{
"epoch": 9.91458206223307,
"grad_norm": 3.7810068130493164,
"learning_rate": 1.6951393125889773e-05,
"loss": 1.4047,
"step": 32500
},
{
"epoch": 10.0,
"eval_loss": 2.808703899383545,
"eval_rouge1": 27.8772,
"eval_rouge2": 12.0303,
"eval_rougeL": 24.5034,
"eval_runtime": 743.946,
"eval_samples_per_second": 5.877,
"eval_steps_per_second": 0.589,
"step": 32780
},
{
"epoch": 10.06711409395973,
"grad_norm": 3.981015682220459,
"learning_rate": 1.644295302013423e-05,
"loss": 1.3691,
"step": 33000
},
{
"epoch": 10.219646125686394,
"grad_norm": 3.829094648361206,
"learning_rate": 1.5934512914378688e-05,
"loss": 1.3323,
"step": 33500
},
{
"epoch": 10.372178157413057,
"grad_norm": 3.636704206466675,
"learning_rate": 1.5426072808623145e-05,
"loss": 1.3337,
"step": 34000
},
{
"epoch": 10.52471018913972,
"grad_norm": 4.167761325836182,
"learning_rate": 1.4917632702867604e-05,
"loss": 1.3512,
"step": 34500
},
{
"epoch": 10.677242220866383,
"grad_norm": 3.829911470413208,
"learning_rate": 1.4409192597112062e-05,
"loss": 1.3453,
"step": 35000
},
{
"epoch": 10.829774252593044,
"grad_norm": 4.00424861907959,
"learning_rate": 1.390075249135652e-05,
"loss": 1.3308,
"step": 35500
},
{
"epoch": 10.982306284319707,
"grad_norm": 4.3669657707214355,
"learning_rate": 1.3392312385600977e-05,
"loss": 1.3781,
"step": 36000
},
{
"epoch": 11.0,
"eval_loss": 2.769470453262329,
"eval_rouge1": 28.0814,
"eval_rouge2": 12.3353,
"eval_rougeL": 24.7091,
"eval_runtime": 745.2741,
"eval_samples_per_second": 5.866,
"eval_steps_per_second": 0.588,
"step": 36058
}
],
"logging_steps": 500,
"max_steps": 49170,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8722380106542694e+17,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}