GRS-Constrained-Paraphrasing-Bart / trainer_state.json
imohammad12's picture
model uploaded
63c8c98
raw
history blame
33.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.807554874936192,
"global_step": 5500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"eval_gen_len": 11.1391,
"eval_loss": 1.4340903759002686,
"eval_rouge1": 23.6512,
"eval_rouge2": 14.437,
"eval_rougeL": 21.5776,
"eval_rougeLsum": 21.9318,
"eval_runtime": 142.6631,
"eval_samples_per_second": 35.286,
"eval_steps_per_second": 1.472,
"step": 100
},
{
"epoch": 0.1,
"eval_gen_len": 9.006,
"eval_loss": 1.498289704322815,
"eval_rouge1": 15.3475,
"eval_rouge2": 9.3183,
"eval_rougeL": 14.1923,
"eval_rougeLsum": 14.233,
"eval_runtime": 142.2039,
"eval_samples_per_second": 35.4,
"eval_steps_per_second": 1.477,
"step": 200
},
{
"epoch": 0.15,
"eval_gen_len": 5.8882,
"eval_loss": 1.4406640529632568,
"eval_rouge1": 4.4607,
"eval_rouge2": 2.5402,
"eval_rougeL": 4.0061,
"eval_rougeLsum": 4.0532,
"eval_runtime": 139.4058,
"eval_samples_per_second": 36.11,
"eval_steps_per_second": 1.506,
"step": 300
},
{
"epoch": 0.2,
"eval_gen_len": 8.2084,
"eval_loss": 1.4275349378585815,
"eval_rouge1": 11.9263,
"eval_rouge2": 6.9699,
"eval_rougeL": 11.0646,
"eval_rougeLsum": 11.1028,
"eval_runtime": 141.9092,
"eval_samples_per_second": 35.473,
"eval_steps_per_second": 1.48,
"step": 400
},
{
"epoch": 0.26,
"learning_rate": 1.830866088140208e-05,
"loss": 1.3664,
"step": 500
},
{
"epoch": 0.26,
"eval_gen_len": 8.6985,
"eval_loss": 1.3808070421218872,
"eval_rouge1": 10.8243,
"eval_rouge2": 6.646,
"eval_rougeL": 10.1357,
"eval_rougeLsum": 10.1831,
"eval_runtime": 141.9295,
"eval_samples_per_second": 35.468,
"eval_steps_per_second": 1.48,
"step": 500
},
{
"epoch": 0.27,
"learning_rate": 1.824059894503999e-05,
"loss": 1.2606,
"step": 520
},
{
"epoch": 0.28,
"learning_rate": 1.8172537008677897e-05,
"loss": 1.2438,
"step": 540
},
{
"epoch": 0.29,
"learning_rate": 1.8104475072315808e-05,
"loss": 1.2952,
"step": 560
},
{
"epoch": 0.3,
"learning_rate": 1.803641313595372e-05,
"loss": 1.2639,
"step": 580
},
{
"epoch": 0.31,
"learning_rate": 1.796835119959163e-05,
"loss": 1.2335,
"step": 600
},
{
"epoch": 0.32,
"learning_rate": 1.790028926322954e-05,
"loss": 1.258,
"step": 620
},
{
"epoch": 0.33,
"learning_rate": 1.7832227326867452e-05,
"loss": 1.2501,
"step": 640
},
{
"epoch": 0.34,
"learning_rate": 1.7764165390505363e-05,
"loss": 1.2657,
"step": 660
},
{
"epoch": 0.35,
"learning_rate": 1.769610345414327e-05,
"loss": 1.2659,
"step": 680
},
{
"epoch": 0.36,
"learning_rate": 1.762804151778118e-05,
"loss": 1.2469,
"step": 700
},
{
"epoch": 0.37,
"learning_rate": 1.7559979581419092e-05,
"loss": 1.2932,
"step": 720
},
{
"epoch": 0.38,
"learning_rate": 1.7491917645057003e-05,
"loss": 1.2521,
"step": 740
},
{
"epoch": 0.39,
"learning_rate": 1.7423855708694914e-05,
"loss": 1.2671,
"step": 760
},
{
"epoch": 0.4,
"learning_rate": 1.7355793772332825e-05,
"loss": 1.2515,
"step": 780
},
{
"epoch": 0.41,
"learning_rate": 1.7287731835970736e-05,
"loss": 1.2208,
"step": 800
},
{
"epoch": 0.42,
"learning_rate": 1.7219669899608644e-05,
"loss": 1.2493,
"step": 820
},
{
"epoch": 0.43,
"learning_rate": 1.7151607963246555e-05,
"loss": 1.2419,
"step": 840
},
{
"epoch": 0.44,
"learning_rate": 1.7083546026884466e-05,
"loss": 1.2693,
"step": 860
},
{
"epoch": 0.45,
"learning_rate": 1.7015484090522377e-05,
"loss": 1.2141,
"step": 880
},
{
"epoch": 0.46,
"learning_rate": 1.6947422154160288e-05,
"loss": 1.2465,
"step": 900
},
{
"epoch": 0.47,
"learning_rate": 1.68793602177982e-05,
"loss": 1.2586,
"step": 920
},
{
"epoch": 0.48,
"learning_rate": 1.681129828143611e-05,
"loss": 1.2394,
"step": 940
},
{
"epoch": 0.49,
"learning_rate": 1.6743236345074017e-05,
"loss": 1.2733,
"step": 960
},
{
"epoch": 0.5,
"learning_rate": 1.6675174408711928e-05,
"loss": 1.2734,
"step": 980
},
{
"epoch": 0.51,
"learning_rate": 1.660711247234984e-05,
"loss": 1.2333,
"step": 1000
},
{
"epoch": 0.52,
"learning_rate": 1.653905053598775e-05,
"loss": 1.198,
"step": 1020
},
{
"epoch": 0.53,
"learning_rate": 1.647098859962566e-05,
"loss": 1.2297,
"step": 1040
},
{
"epoch": 0.54,
"learning_rate": 1.6402926663263572e-05,
"loss": 1.2702,
"step": 1060
},
{
"epoch": 0.55,
"learning_rate": 1.6334864726901483e-05,
"loss": 1.2799,
"step": 1080
},
{
"epoch": 0.56,
"learning_rate": 1.6266802790539394e-05,
"loss": 1.2275,
"step": 1100
},
{
"epoch": 0.57,
"learning_rate": 1.61987408541773e-05,
"loss": 1.2433,
"step": 1120
},
{
"epoch": 0.58,
"learning_rate": 1.6130678917815213e-05,
"loss": 1.2376,
"step": 1140
},
{
"epoch": 0.59,
"learning_rate": 1.6062616981453123e-05,
"loss": 1.2032,
"step": 1160
},
{
"epoch": 0.6,
"learning_rate": 1.5994555045091034e-05,
"loss": 1.2623,
"step": 1180
},
{
"epoch": 0.61,
"learning_rate": 1.5926493108728945e-05,
"loss": 1.2367,
"step": 1200
},
{
"epoch": 0.62,
"learning_rate": 1.5858431172366856e-05,
"loss": 1.2015,
"step": 1220
},
{
"epoch": 0.63,
"learning_rate": 1.5790369236004764e-05,
"loss": 1.1896,
"step": 1240
},
{
"epoch": 0.64,
"learning_rate": 1.5722307299642675e-05,
"loss": 1.2868,
"step": 1260
},
{
"epoch": 0.65,
"learning_rate": 1.5654245363280586e-05,
"loss": 1.1849,
"step": 1280
},
{
"epoch": 0.66,
"learning_rate": 1.5586183426918497e-05,
"loss": 1.2463,
"step": 1300
},
{
"epoch": 0.67,
"learning_rate": 1.5518121490556408e-05,
"loss": 1.2507,
"step": 1320
},
{
"epoch": 0.68,
"learning_rate": 1.545005955419432e-05,
"loss": 1.2211,
"step": 1340
},
{
"epoch": 0.69,
"learning_rate": 1.538199761783223e-05,
"loss": 1.2082,
"step": 1360
},
{
"epoch": 0.7,
"learning_rate": 1.531393568147014e-05,
"loss": 1.2116,
"step": 1380
},
{
"epoch": 0.71,
"learning_rate": 1.5245873745108048e-05,
"loss": 1.2223,
"step": 1400
},
{
"epoch": 0.72,
"learning_rate": 1.517781180874596e-05,
"loss": 1.2209,
"step": 1420
},
{
"epoch": 0.74,
"learning_rate": 1.510974987238387e-05,
"loss": 1.2513,
"step": 1440
},
{
"epoch": 0.75,
"learning_rate": 1.5041687936021781e-05,
"loss": 1.1825,
"step": 1460
},
{
"epoch": 0.76,
"learning_rate": 1.4977029096477795e-05,
"loss": 1.2369,
"step": 1480
},
{
"epoch": 0.77,
"learning_rate": 1.4908967160115706e-05,
"loss": 1.2623,
"step": 1500
},
{
"epoch": 0.77,
"eval_gen_len": 10.2668,
"eval_loss": 1.3323159217834473,
"eval_rouge1": 5.5,
"eval_rouge2": 3.2122,
"eval_rougeL": 5.0222,
"eval_rougeLsum": 5.0534,
"eval_runtime": 141.5634,
"eval_samples_per_second": 35.56,
"eval_steps_per_second": 1.483,
"step": 1500
},
{
"epoch": 0.78,
"learning_rate": 1.4840905223753617e-05,
"loss": 1.2388,
"step": 1520
},
{
"epoch": 0.79,
"learning_rate": 1.4772843287391528e-05,
"loss": 1.2583,
"step": 1540
},
{
"epoch": 0.8,
"learning_rate": 1.4704781351029439e-05,
"loss": 1.1844,
"step": 1560
},
{
"epoch": 0.81,
"learning_rate": 1.4636719414667348e-05,
"loss": 1.2002,
"step": 1580
},
{
"epoch": 0.82,
"learning_rate": 1.4568657478305259e-05,
"loss": 1.2349,
"step": 1600
},
{
"epoch": 0.83,
"learning_rate": 1.450059554194317e-05,
"loss": 1.2212,
"step": 1620
},
{
"epoch": 0.84,
"learning_rate": 1.443253360558108e-05,
"loss": 1.2169,
"step": 1640
},
{
"epoch": 0.85,
"learning_rate": 1.4364471669218992e-05,
"loss": 1.177,
"step": 1660
},
{
"epoch": 0.86,
"learning_rate": 1.4296409732856901e-05,
"loss": 1.1969,
"step": 1680
},
{
"epoch": 0.87,
"learning_rate": 1.4228347796494812e-05,
"loss": 1.2036,
"step": 1700
},
{
"epoch": 0.88,
"learning_rate": 1.4160285860132721e-05,
"loss": 1.1921,
"step": 1720
},
{
"epoch": 0.89,
"learning_rate": 1.4092223923770632e-05,
"loss": 1.2075,
"step": 1740
},
{
"epoch": 0.9,
"learning_rate": 1.4024161987408542e-05,
"loss": 1.2248,
"step": 1760
},
{
"epoch": 0.91,
"learning_rate": 1.3956100051046453e-05,
"loss": 1.1921,
"step": 1780
},
{
"epoch": 0.92,
"learning_rate": 1.3888038114684363e-05,
"loss": 1.2332,
"step": 1800
},
{
"epoch": 0.93,
"learning_rate": 1.3819976178322274e-05,
"loss": 1.2762,
"step": 1820
},
{
"epoch": 0.94,
"learning_rate": 1.3751914241960185e-05,
"loss": 1.24,
"step": 1840
},
{
"epoch": 0.95,
"learning_rate": 1.3683852305598095e-05,
"loss": 1.2117,
"step": 1860
},
{
"epoch": 0.96,
"learning_rate": 1.3615790369236006e-05,
"loss": 1.2361,
"step": 1880
},
{
"epoch": 0.97,
"learning_rate": 1.3547728432873917e-05,
"loss": 1.2175,
"step": 1900
},
{
"epoch": 0.98,
"learning_rate": 1.3479666496511828e-05,
"loss": 1.1877,
"step": 1920
},
{
"epoch": 0.99,
"learning_rate": 1.3411604560149739e-05,
"loss": 1.2637,
"step": 1940
},
{
"epoch": 1.0,
"learning_rate": 1.3343542623787648e-05,
"loss": 1.1752,
"step": 1960
},
{
"epoch": 1.01,
"learning_rate": 1.3275480687425559e-05,
"loss": 1.0205,
"step": 1980
},
{
"epoch": 1.02,
"learning_rate": 1.320741875106347e-05,
"loss": 1.0218,
"step": 2000
},
{
"epoch": 1.03,
"learning_rate": 1.3139356814701379e-05,
"loss": 1.0558,
"step": 2020
},
{
"epoch": 1.04,
"learning_rate": 1.3071294878339288e-05,
"loss": 1.0654,
"step": 2040
},
{
"epoch": 1.05,
"learning_rate": 1.30032329419772e-05,
"loss": 1.0294,
"step": 2060
},
{
"epoch": 1.06,
"learning_rate": 1.293517100561511e-05,
"loss": 1.0307,
"step": 2080
},
{
"epoch": 1.07,
"learning_rate": 1.2867109069253021e-05,
"loss": 1.0403,
"step": 2100
},
{
"epoch": 1.08,
"learning_rate": 1.2799047132890932e-05,
"loss": 1.0457,
"step": 2120
},
{
"epoch": 1.09,
"learning_rate": 1.2730985196528841e-05,
"loss": 0.9853,
"step": 2140
},
{
"epoch": 1.1,
"learning_rate": 1.2662923260166752e-05,
"loss": 1.0287,
"step": 2160
},
{
"epoch": 1.11,
"learning_rate": 1.2594861323804663e-05,
"loss": 1.0173,
"step": 2180
},
{
"epoch": 1.12,
"learning_rate": 1.2526799387442574e-05,
"loss": 1.0501,
"step": 2200
},
{
"epoch": 1.13,
"learning_rate": 1.2458737451080485e-05,
"loss": 1.0665,
"step": 2220
},
{
"epoch": 1.14,
"learning_rate": 1.2390675514718396e-05,
"loss": 1.0629,
"step": 2240
},
{
"epoch": 1.15,
"learning_rate": 1.2322613578356306e-05,
"loss": 1.0737,
"step": 2260
},
{
"epoch": 1.16,
"learning_rate": 1.2254551641994217e-05,
"loss": 1.0557,
"step": 2280
},
{
"epoch": 1.17,
"learning_rate": 1.2186489705632126e-05,
"loss": 1.0174,
"step": 2300
},
{
"epoch": 1.18,
"learning_rate": 1.2118427769270035e-05,
"loss": 1.0301,
"step": 2320
},
{
"epoch": 1.19,
"learning_rate": 1.2050365832907946e-05,
"loss": 1.0604,
"step": 2340
},
{
"epoch": 1.2,
"learning_rate": 1.1982303896545857e-05,
"loss": 1.0277,
"step": 2360
},
{
"epoch": 1.21,
"learning_rate": 1.1914241960183768e-05,
"loss": 1.0805,
"step": 2380
},
{
"epoch": 1.23,
"learning_rate": 1.1846180023821679e-05,
"loss": 1.0641,
"step": 2400
},
{
"epoch": 1.24,
"learning_rate": 1.1778118087459588e-05,
"loss": 1.0864,
"step": 2420
},
{
"epoch": 1.25,
"learning_rate": 1.17100561510975e-05,
"loss": 1.0462,
"step": 2440
},
{
"epoch": 1.26,
"learning_rate": 1.164199421473541e-05,
"loss": 1.0208,
"step": 2460
},
{
"epoch": 1.27,
"learning_rate": 1.1573932278373321e-05,
"loss": 1.0617,
"step": 2480
},
{
"epoch": 1.28,
"learning_rate": 1.1505870342011232e-05,
"loss": 1.039,
"step": 2500
},
{
"epoch": 1.29,
"learning_rate": 1.1437808405649143e-05,
"loss": 1.046,
"step": 2520
},
{
"epoch": 1.3,
"learning_rate": 1.1369746469287052e-05,
"loss": 1.0336,
"step": 2540
},
{
"epoch": 1.31,
"learning_rate": 1.1301684532924963e-05,
"loss": 1.0181,
"step": 2560
},
{
"epoch": 1.32,
"learning_rate": 1.1233622596562874e-05,
"loss": 1.0551,
"step": 2580
},
{
"epoch": 1.33,
"learning_rate": 1.1165560660200782e-05,
"loss": 1.0149,
"step": 2600
},
{
"epoch": 1.34,
"learning_rate": 1.1097498723838693e-05,
"loss": 1.0448,
"step": 2620
},
{
"epoch": 1.35,
"learning_rate": 1.1029436787476604e-05,
"loss": 1.0477,
"step": 2640
},
{
"epoch": 1.36,
"learning_rate": 1.0961374851114515e-05,
"loss": 1.0601,
"step": 2660
},
{
"epoch": 1.37,
"learning_rate": 1.0893312914752426e-05,
"loss": 1.0558,
"step": 2680
},
{
"epoch": 1.38,
"learning_rate": 1.0825250978390337e-05,
"loss": 1.0861,
"step": 2700
},
{
"epoch": 1.39,
"learning_rate": 1.0757189042028246e-05,
"loss": 1.0411,
"step": 2720
},
{
"epoch": 1.4,
"learning_rate": 1.0689127105666157e-05,
"loss": 1.0597,
"step": 2740
},
{
"epoch": 1.41,
"learning_rate": 1.0621065169304068e-05,
"loss": 1.0235,
"step": 2760
},
{
"epoch": 1.42,
"learning_rate": 1.0553003232941979e-05,
"loss": 1.063,
"step": 2780
},
{
"epoch": 1.43,
"learning_rate": 1.048494129657989e-05,
"loss": 1.0622,
"step": 2800
},
{
"epoch": 1.44,
"learning_rate": 1.0416879360217799e-05,
"loss": 1.0778,
"step": 2820
},
{
"epoch": 1.45,
"learning_rate": 1.034881742385571e-05,
"loss": 1.0465,
"step": 2840
},
{
"epoch": 1.46,
"learning_rate": 1.0280755487493621e-05,
"loss": 1.0644,
"step": 2860
},
{
"epoch": 1.47,
"learning_rate": 1.021269355113153e-05,
"loss": 1.0282,
"step": 2880
},
{
"epoch": 1.48,
"learning_rate": 1.014463161476944e-05,
"loss": 1.0326,
"step": 2900
},
{
"epoch": 1.49,
"learning_rate": 1.007656967840735e-05,
"loss": 1.0591,
"step": 2920
},
{
"epoch": 1.5,
"learning_rate": 1.0008507742045262e-05,
"loss": 1.0269,
"step": 2940
},
{
"epoch": 1.51,
"learning_rate": 9.940445805683172e-06,
"loss": 1.0326,
"step": 2960
},
{
"epoch": 1.52,
"learning_rate": 9.872383869321083e-06,
"loss": 1.0935,
"step": 2980
},
{
"epoch": 1.53,
"learning_rate": 9.804321932958993e-06,
"loss": 1.0214,
"step": 3000
},
{
"epoch": 1.53,
"eval_gen_len": 12.1136,
"eval_loss": 1.3700170516967773,
"eval_rouge1": 6.4308,
"eval_rouge2": 3.9131,
"eval_rougeL": 6.0216,
"eval_rougeLsum": 6.0417,
"eval_runtime": 143.5844,
"eval_samples_per_second": 35.06,
"eval_steps_per_second": 1.463,
"step": 3000
},
{
"epoch": 1.54,
"learning_rate": 9.736259996596904e-06,
"loss": 1.0338,
"step": 3020
},
{
"epoch": 1.55,
"learning_rate": 9.668198060234815e-06,
"loss": 1.046,
"step": 3040
},
{
"epoch": 1.56,
"learning_rate": 9.600136123872726e-06,
"loss": 1.0208,
"step": 3060
},
{
"epoch": 1.57,
"learning_rate": 9.532074187510637e-06,
"loss": 1.0093,
"step": 3080
},
{
"epoch": 1.58,
"learning_rate": 9.464012251148546e-06,
"loss": 1.0358,
"step": 3100
},
{
"epoch": 1.59,
"learning_rate": 9.395950314786457e-06,
"loss": 1.0709,
"step": 3120
},
{
"epoch": 1.6,
"learning_rate": 9.327888378424366e-06,
"loss": 1.0602,
"step": 3140
},
{
"epoch": 1.61,
"learning_rate": 9.259826442062277e-06,
"loss": 1.0062,
"step": 3160
},
{
"epoch": 1.62,
"learning_rate": 9.191764505700188e-06,
"loss": 1.0823,
"step": 3180
},
{
"epoch": 1.63,
"learning_rate": 9.123702569338099e-06,
"loss": 1.0439,
"step": 3200
},
{
"epoch": 1.64,
"learning_rate": 9.05564063297601e-06,
"loss": 1.0454,
"step": 3220
},
{
"epoch": 1.65,
"learning_rate": 8.98757869661392e-06,
"loss": 1.0806,
"step": 3240
},
{
"epoch": 1.66,
"learning_rate": 8.91951676025183e-06,
"loss": 1.0176,
"step": 3260
},
{
"epoch": 1.67,
"learning_rate": 8.85145482388974e-06,
"loss": 1.0799,
"step": 3280
},
{
"epoch": 1.68,
"learning_rate": 8.78339288752765e-06,
"loss": 1.0421,
"step": 3300
},
{
"epoch": 1.69,
"learning_rate": 8.715330951165561e-06,
"loss": 1.0136,
"step": 3320
},
{
"epoch": 1.7,
"learning_rate": 8.647269014803472e-06,
"loss": 0.9866,
"step": 3340
},
{
"epoch": 1.72,
"learning_rate": 8.579207078441383e-06,
"loss": 1.0355,
"step": 3360
},
{
"epoch": 1.73,
"learning_rate": 8.511145142079293e-06,
"loss": 1.0791,
"step": 3380
},
{
"epoch": 1.74,
"learning_rate": 8.443083205717204e-06,
"loss": 1.0524,
"step": 3400
},
{
"epoch": 1.75,
"learning_rate": 8.375021269355113e-06,
"loss": 1.0669,
"step": 3420
},
{
"epoch": 1.76,
"learning_rate": 8.306959332993024e-06,
"loss": 1.007,
"step": 3440
},
{
"epoch": 1.77,
"learning_rate": 8.238897396630935e-06,
"loss": 1.0121,
"step": 3460
},
{
"epoch": 1.78,
"learning_rate": 8.170835460268846e-06,
"loss": 1.0281,
"step": 3480
},
{
"epoch": 1.79,
"learning_rate": 8.102773523906757e-06,
"loss": 1.062,
"step": 3500
},
{
"epoch": 1.8,
"learning_rate": 8.034711587544666e-06,
"loss": 1.0041,
"step": 3520
},
{
"epoch": 1.81,
"learning_rate": 7.966649651182577e-06,
"loss": 1.043,
"step": 3540
},
{
"epoch": 1.82,
"learning_rate": 7.898587714820486e-06,
"loss": 1.0586,
"step": 3560
},
{
"epoch": 1.83,
"learning_rate": 7.830525778458397e-06,
"loss": 1.0733,
"step": 3580
},
{
"epoch": 1.84,
"learning_rate": 7.762463842096308e-06,
"loss": 1.0441,
"step": 3600
},
{
"epoch": 1.85,
"learning_rate": 7.69440190573422e-06,
"loss": 1.0267,
"step": 3620
},
{
"epoch": 1.86,
"learning_rate": 7.626339969372129e-06,
"loss": 1.0521,
"step": 3640
},
{
"epoch": 1.87,
"learning_rate": 7.55827803301004e-06,
"loss": 1.0168,
"step": 3660
},
{
"epoch": 1.88,
"learning_rate": 7.4902160966479495e-06,
"loss": 1.046,
"step": 3680
},
{
"epoch": 1.89,
"learning_rate": 7.4221541602858605e-06,
"loss": 1.0259,
"step": 3700
},
{
"epoch": 1.9,
"learning_rate": 7.3540922239237715e-06,
"loss": 1.0173,
"step": 3720
},
{
"epoch": 1.91,
"learning_rate": 7.286030287561682e-06,
"loss": 1.0507,
"step": 3740
},
{
"epoch": 1.92,
"learning_rate": 7.2179683511995926e-06,
"loss": 1.0145,
"step": 3760
},
{
"epoch": 1.93,
"learning_rate": 7.149906414837503e-06,
"loss": 1.0193,
"step": 3780
},
{
"epoch": 1.94,
"learning_rate": 7.081844478475414e-06,
"loss": 1.0452,
"step": 3800
},
{
"epoch": 1.95,
"learning_rate": 7.013782542113323e-06,
"loss": 1.0494,
"step": 3820
},
{
"epoch": 1.96,
"learning_rate": 6.945720605751234e-06,
"loss": 1.0334,
"step": 3840
},
{
"epoch": 1.97,
"learning_rate": 6.877658669389145e-06,
"loss": 0.9954,
"step": 3860
},
{
"epoch": 1.98,
"learning_rate": 6.809596733027055e-06,
"loss": 1.0286,
"step": 3880
},
{
"epoch": 1.99,
"learning_rate": 6.741534796664966e-06,
"loss": 1.0547,
"step": 3900
},
{
"epoch": 2.0,
"learning_rate": 6.673472860302876e-06,
"loss": 1.0648,
"step": 3920
},
{
"epoch": 2.01,
"learning_rate": 6.605410923940787e-06,
"loss": 0.9251,
"step": 3940
},
{
"epoch": 2.02,
"learning_rate": 6.537348987578696e-06,
"loss": 0.9152,
"step": 3960
},
{
"epoch": 2.03,
"learning_rate": 6.469287051216607e-06,
"loss": 0.9066,
"step": 3980
},
{
"epoch": 2.04,
"learning_rate": 6.401225114854518e-06,
"loss": 0.8861,
"step": 4000
},
{
"epoch": 2.05,
"learning_rate": 6.333163178492428e-06,
"loss": 0.8827,
"step": 4020
},
{
"epoch": 2.06,
"learning_rate": 6.265101242130339e-06,
"loss": 0.9346,
"step": 4040
},
{
"epoch": 2.07,
"learning_rate": 6.1970393057682494e-06,
"loss": 0.9073,
"step": 4060
},
{
"epoch": 2.08,
"learning_rate": 6.12897736940616e-06,
"loss": 0.9338,
"step": 4080
},
{
"epoch": 2.09,
"learning_rate": 6.060915433044071e-06,
"loss": 0.9163,
"step": 4100
},
{
"epoch": 2.1,
"learning_rate": 5.992853496681981e-06,
"loss": 0.9198,
"step": 4120
},
{
"epoch": 2.11,
"learning_rate": 5.924791560319892e-06,
"loss": 0.9399,
"step": 4140
},
{
"epoch": 2.12,
"learning_rate": 5.856729623957802e-06,
"loss": 0.8875,
"step": 4160
},
{
"epoch": 2.13,
"learning_rate": 5.788667687595713e-06,
"loss": 0.926,
"step": 4180
},
{
"epoch": 2.14,
"learning_rate": 5.720605751233624e-06,
"loss": 0.9618,
"step": 4200
},
{
"epoch": 2.15,
"learning_rate": 5.652543814871534e-06,
"loss": 0.9256,
"step": 4220
},
{
"epoch": 2.16,
"learning_rate": 5.587884975327549e-06,
"loss": 0.9011,
"step": 4240
},
{
"epoch": 2.17,
"learning_rate": 5.519823038965459e-06,
"loss": 0.9066,
"step": 4260
},
{
"epoch": 2.18,
"learning_rate": 5.451761102603369e-06,
"loss": 0.8843,
"step": 4280
},
{
"epoch": 2.19,
"learning_rate": 5.383699166241279e-06,
"loss": 0.9288,
"step": 4300
},
{
"epoch": 2.21,
"learning_rate": 5.31563722987919e-06,
"loss": 0.9352,
"step": 4320
},
{
"epoch": 2.22,
"learning_rate": 5.247575293517101e-06,
"loss": 0.937,
"step": 4340
},
{
"epoch": 2.23,
"learning_rate": 5.1795133571550115e-06,
"loss": 0.9036,
"step": 4360
},
{
"epoch": 2.24,
"learning_rate": 5.1114514207929224e-06,
"loss": 0.8833,
"step": 4380
},
{
"epoch": 2.25,
"learning_rate": 5.0433894844308325e-06,
"loss": 0.9567,
"step": 4400
},
{
"epoch": 2.26,
"learning_rate": 4.975327548068743e-06,
"loss": 0.9222,
"step": 4420
},
{
"epoch": 2.27,
"learning_rate": 4.907265611706654e-06,
"loss": 0.9528,
"step": 4440
},
{
"epoch": 2.28,
"learning_rate": 4.839203675344564e-06,
"loss": 0.9287,
"step": 4460
},
{
"epoch": 2.29,
"learning_rate": 4.771141738982475e-06,
"loss": 0.9269,
"step": 4480
},
{
"epoch": 2.3,
"learning_rate": 4.703079802620385e-06,
"loss": 0.8977,
"step": 4500
},
{
"epoch": 2.3,
"eval_gen_len": 12.9261,
"eval_loss": 1.3723982572555542,
"eval_rouge1": 4.2774,
"eval_rouge2": 2.4929,
"eval_rougeL": 3.9376,
"eval_rougeLsum": 3.9429,
"eval_runtime": 142.026,
"eval_samples_per_second": 35.444,
"eval_steps_per_second": 1.479,
"step": 4500
},
{
"epoch": 2.31,
"learning_rate": 4.635017866258296e-06,
"loss": 0.913,
"step": 4520
},
{
"epoch": 2.32,
"learning_rate": 4.566955929896206e-06,
"loss": 0.9012,
"step": 4540
},
{
"epoch": 2.33,
"learning_rate": 4.498893993534116e-06,
"loss": 0.9077,
"step": 4560
},
{
"epoch": 2.34,
"learning_rate": 4.430832057172027e-06,
"loss": 0.9181,
"step": 4580
},
{
"epoch": 2.35,
"learning_rate": 4.362770120809938e-06,
"loss": 0.9234,
"step": 4600
},
{
"epoch": 2.36,
"learning_rate": 4.294708184447848e-06,
"loss": 0.9014,
"step": 4620
},
{
"epoch": 2.37,
"learning_rate": 4.226646248085758e-06,
"loss": 0.8963,
"step": 4640
},
{
"epoch": 2.38,
"learning_rate": 4.158584311723669e-06,
"loss": 0.9097,
"step": 4660
},
{
"epoch": 2.39,
"learning_rate": 4.090522375361579e-06,
"loss": 0.903,
"step": 4680
},
{
"epoch": 2.4,
"learning_rate": 4.0224604389994894e-06,
"loss": 0.8938,
"step": 4700
},
{
"epoch": 2.41,
"learning_rate": 3.9543985026374e-06,
"loss": 0.9029,
"step": 4720
},
{
"epoch": 2.42,
"learning_rate": 3.886336566275311e-06,
"loss": 0.9338,
"step": 4740
},
{
"epoch": 2.43,
"learning_rate": 3.8182746299132215e-06,
"loss": 0.9295,
"step": 4760
},
{
"epoch": 2.44,
"learning_rate": 3.7502126935511316e-06,
"loss": 0.8946,
"step": 4780
},
{
"epoch": 2.45,
"learning_rate": 3.6821507571890426e-06,
"loss": 0.891,
"step": 4800
},
{
"epoch": 2.46,
"learning_rate": 3.6140888208269527e-06,
"loss": 0.9123,
"step": 4820
},
{
"epoch": 2.47,
"learning_rate": 3.5460268844648632e-06,
"loss": 0.9518,
"step": 4840
},
{
"epoch": 2.48,
"learning_rate": 3.477964948102774e-06,
"loss": 0.9378,
"step": 4860
},
{
"epoch": 2.49,
"learning_rate": 3.4099030117406843e-06,
"loss": 0.9089,
"step": 4880
},
{
"epoch": 2.5,
"learning_rate": 3.3418410753785945e-06,
"loss": 0.9241,
"step": 4900
},
{
"epoch": 2.51,
"learning_rate": 3.2737791390165054e-06,
"loss": 0.9281,
"step": 4920
},
{
"epoch": 2.52,
"learning_rate": 3.205717202654416e-06,
"loss": 0.9082,
"step": 4940
},
{
"epoch": 2.53,
"learning_rate": 3.1376552662923265e-06,
"loss": 0.915,
"step": 4960
},
{
"epoch": 2.54,
"learning_rate": 3.0695933299302366e-06,
"loss": 0.882,
"step": 4980
},
{
"epoch": 2.55,
"learning_rate": 3.001531393568147e-06,
"loss": 0.9182,
"step": 5000
},
{
"epoch": 2.56,
"learning_rate": 2.9334694572060577e-06,
"loss": 0.9369,
"step": 5020
},
{
"epoch": 2.57,
"learning_rate": 2.865407520843968e-06,
"loss": 0.9324,
"step": 5040
},
{
"epoch": 2.58,
"learning_rate": 2.797345584481879e-06,
"loss": 0.9069,
"step": 5060
},
{
"epoch": 2.59,
"learning_rate": 2.7292836481197893e-06,
"loss": 0.9079,
"step": 5080
},
{
"epoch": 2.6,
"learning_rate": 2.6612217117577e-06,
"loss": 0.8965,
"step": 5100
},
{
"epoch": 2.61,
"learning_rate": 2.59315977539561e-06,
"loss": 0.9312,
"step": 5120
},
{
"epoch": 2.62,
"learning_rate": 2.5250978390335206e-06,
"loss": 0.9454,
"step": 5140
},
{
"epoch": 2.63,
"learning_rate": 2.4570359026714315e-06,
"loss": 0.9115,
"step": 5160
},
{
"epoch": 2.64,
"learning_rate": 2.3889739663093416e-06,
"loss": 0.9265,
"step": 5180
},
{
"epoch": 2.65,
"learning_rate": 2.320912029947252e-06,
"loss": 0.9381,
"step": 5200
},
{
"epoch": 2.66,
"learning_rate": 2.2528500935851627e-06,
"loss": 0.9334,
"step": 5220
},
{
"epoch": 2.67,
"learning_rate": 2.1847881572230733e-06,
"loss": 0.914,
"step": 5240
},
{
"epoch": 2.69,
"learning_rate": 2.116726220860984e-06,
"loss": 0.8805,
"step": 5260
},
{
"epoch": 2.7,
"learning_rate": 2.048664284498894e-06,
"loss": 0.9417,
"step": 5280
},
{
"epoch": 2.71,
"learning_rate": 1.980602348136805e-06,
"loss": 0.8974,
"step": 5300
},
{
"epoch": 2.72,
"learning_rate": 1.912540411774715e-06,
"loss": 0.9722,
"step": 5320
},
{
"epoch": 2.73,
"learning_rate": 1.8444784754126258e-06,
"loss": 0.9155,
"step": 5340
},
{
"epoch": 2.74,
"learning_rate": 1.7764165390505361e-06,
"loss": 0.9088,
"step": 5360
},
{
"epoch": 2.75,
"learning_rate": 1.7083546026884464e-06,
"loss": 0.894,
"step": 5380
},
{
"epoch": 2.76,
"learning_rate": 1.6402926663263572e-06,
"loss": 0.936,
"step": 5400
},
{
"epoch": 2.77,
"learning_rate": 1.5722307299642675e-06,
"loss": 0.8867,
"step": 5420
},
{
"epoch": 2.78,
"learning_rate": 1.504168793602178e-06,
"loss": 0.9142,
"step": 5440
},
{
"epoch": 2.79,
"learning_rate": 1.4361068572400886e-06,
"loss": 0.9394,
"step": 5460
},
{
"epoch": 2.8,
"learning_rate": 1.3680449208779992e-06,
"loss": 0.9313,
"step": 5480
},
{
"epoch": 2.81,
"learning_rate": 1.2999829845159095e-06,
"loss": 0.9197,
"step": 5500
}
],
"max_steps": 5877,
"num_train_epochs": 3,
"total_flos": 1.5087907085549568e+16,
"trial_name": null,
"trial_params": null
}