|
{ |
|
"best_metric": 2.0125675201416016, |
|
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_reverse/checkpoint-8", |
|
"epoch": 0.9996779388083736, |
|
"eval_steps": 8, |
|
"global_step": 388, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025764895330112722, |
|
"grad_norm": 5.605652332305908, |
|
"learning_rate": 3.75e-05, |
|
"loss": 2.0287, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010305958132045089, |
|
"grad_norm": 3.3669660091400146, |
|
"learning_rate": 0.00015, |
|
"loss": 2.1016, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.020611916264090178, |
|
"grad_norm": 3.2068347930908203, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1146, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.020611916264090178, |
|
"eval_loss": 2.0125675201416016, |
|
"eval_runtime": 10.2437, |
|
"eval_samples_per_second": 23.917, |
|
"eval_steps_per_second": 3.026, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.030917874396135265, |
|
"grad_norm": 4.330184459686279, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 2.0531, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.041223832528180356, |
|
"grad_norm": 3.5907206535339355, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 2.1241, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.041223832528180356, |
|
"eval_loss": 2.0399129390716553, |
|
"eval_runtime": 10.2349, |
|
"eval_samples_per_second": 23.938, |
|
"eval_steps_per_second": 3.029, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05152979066022544, |
|
"grad_norm": 4.796765327453613, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 2.0772, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06183574879227053, |
|
"grad_norm": 5.596073627471924, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 2.1297, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06183574879227053, |
|
"eval_loss": 2.0522220134735107, |
|
"eval_runtime": 10.2415, |
|
"eval_samples_per_second": 23.922, |
|
"eval_steps_per_second": 3.027, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07214170692431562, |
|
"grad_norm": 4.186759948730469, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 2.1422, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08244766505636071, |
|
"grad_norm": 3.64916729927063, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 2.0751, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08244766505636071, |
|
"eval_loss": 2.0552845001220703, |
|
"eval_runtime": 10.2352, |
|
"eval_samples_per_second": 23.937, |
|
"eval_steps_per_second": 3.029, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0927536231884058, |
|
"grad_norm": 3.8289270401000977, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 2.1251, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10305958132045089, |
|
"grad_norm": 2.9287831783294678, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 2.1182, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10305958132045089, |
|
"eval_loss": 2.06056547164917, |
|
"eval_runtime": 10.2327, |
|
"eval_samples_per_second": 23.943, |
|
"eval_steps_per_second": 3.029, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11336553945249597, |
|
"grad_norm": 2.9628751277923584, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 2.06, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.12367149758454106, |
|
"grad_norm": 4.328015327453613, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 2.1169, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12367149758454106, |
|
"eval_loss": 2.0665557384490967, |
|
"eval_runtime": 10.2388, |
|
"eval_samples_per_second": 23.929, |
|
"eval_steps_per_second": 3.028, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.13397745571658615, |
|
"grad_norm": 3.2509281635284424, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 2.084, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.14428341384863125, |
|
"grad_norm": 3.328118085861206, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 2.1253, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.14428341384863125, |
|
"eval_loss": 2.0729715824127197, |
|
"eval_runtime": 10.2357, |
|
"eval_samples_per_second": 23.936, |
|
"eval_steps_per_second": 3.029, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.15458937198067632, |
|
"grad_norm": 3.165828227996826, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 2.1193, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16489533011272142, |
|
"grad_norm": 2.8887364864349365, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 2.1136, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.16489533011272142, |
|
"eval_loss": 2.071445941925049, |
|
"eval_runtime": 10.2149, |
|
"eval_samples_per_second": 23.985, |
|
"eval_steps_per_second": 3.035, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.1752012882447665, |
|
"grad_norm": 2.8911757469177246, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 2.1775, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.1855072463768116, |
|
"grad_norm": 4.058267116546631, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 2.127, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.1855072463768116, |
|
"eval_loss": 2.083888292312622, |
|
"eval_runtime": 10.2223, |
|
"eval_samples_per_second": 23.967, |
|
"eval_steps_per_second": 3.033, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.19581320450885667, |
|
"grad_norm": 3.4438698291778564, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 2.1427, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.20611916264090177, |
|
"grad_norm": 2.7667691707611084, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 2.1835, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20611916264090177, |
|
"eval_loss": 2.080258369445801, |
|
"eval_runtime": 10.2065, |
|
"eval_samples_per_second": 24.004, |
|
"eval_steps_per_second": 3.037, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.21642512077294687, |
|
"grad_norm": 3.604095220565796, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 2.2259, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.22673107890499195, |
|
"grad_norm": 3.442725896835327, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 2.1556, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.22673107890499195, |
|
"eval_loss": 2.090332269668579, |
|
"eval_runtime": 10.1927, |
|
"eval_samples_per_second": 24.037, |
|
"eval_steps_per_second": 3.041, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 2.6903796195983887, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 2.1015, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.24734299516908212, |
|
"grad_norm": 3.1680166721343994, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 2.1396, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.24734299516908212, |
|
"eval_loss": 2.0886850357055664, |
|
"eval_runtime": 10.1623, |
|
"eval_samples_per_second": 24.109, |
|
"eval_steps_per_second": 3.05, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.2576489533011272, |
|
"grad_norm": 2.664879560470581, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 2.1838, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2679549114331723, |
|
"grad_norm": 3.213966131210327, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 2.1656, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.2679549114331723, |
|
"eval_loss": 2.092794418334961, |
|
"eval_runtime": 10.1287, |
|
"eval_samples_per_second": 24.189, |
|
"eval_steps_per_second": 3.061, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.2782608695652174, |
|
"grad_norm": 3.1961517333984375, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 2.1535, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.2885668276972625, |
|
"grad_norm": 2.4013512134552, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 2.0821, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.2885668276972625, |
|
"eval_loss": 2.0880165100097656, |
|
"eval_runtime": 32.1246, |
|
"eval_samples_per_second": 7.627, |
|
"eval_steps_per_second": 0.965, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.29887278582930754, |
|
"grad_norm": 2.3859596252441406, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 2.1672, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"grad_norm": 2.644300699234009, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 2.1287, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.30917874396135264, |
|
"eval_loss": 2.092282772064209, |
|
"eval_runtime": 32.2089, |
|
"eval_samples_per_second": 7.607, |
|
"eval_steps_per_second": 0.962, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.31948470209339774, |
|
"grad_norm": 2.3782858848571777, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 2.1545, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.32979066022544284, |
|
"grad_norm": 2.4614486694335938, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 2.1298, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.32979066022544284, |
|
"eval_loss": 2.096928834915161, |
|
"eval_runtime": 32.3953, |
|
"eval_samples_per_second": 7.563, |
|
"eval_steps_per_second": 0.957, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.34009661835748795, |
|
"grad_norm": 2.94144606590271, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 2.1464, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.350402576489533, |
|
"grad_norm": 2.5649967193603516, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 2.1317, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.350402576489533, |
|
"eval_loss": 2.096112012863159, |
|
"eval_runtime": 32.6415, |
|
"eval_samples_per_second": 7.506, |
|
"eval_steps_per_second": 0.95, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.3607085346215781, |
|
"grad_norm": 2.5300047397613525, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 2.107, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3710144927536232, |
|
"grad_norm": 2.83783221244812, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 2.1325, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.3710144927536232, |
|
"eval_loss": 2.0906081199645996, |
|
"eval_runtime": 32.902, |
|
"eval_samples_per_second": 7.446, |
|
"eval_steps_per_second": 0.942, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.3813204508856683, |
|
"grad_norm": 3.4615397453308105, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 2.1376, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.39162640901771334, |
|
"grad_norm": 2.862887144088745, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 2.1398, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.39162640901771334, |
|
"eval_loss": 2.090609312057495, |
|
"eval_runtime": 32.9273, |
|
"eval_samples_per_second": 7.441, |
|
"eval_steps_per_second": 0.941, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.40193236714975844, |
|
"grad_norm": 2.8273372650146484, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 2.1514, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.41223832528180354, |
|
"grad_norm": 2.578927516937256, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 2.1569, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.41223832528180354, |
|
"eval_loss": 2.088578701019287, |
|
"eval_runtime": 33.0266, |
|
"eval_samples_per_second": 7.418, |
|
"eval_steps_per_second": 0.939, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.42254428341384864, |
|
"grad_norm": 2.9263362884521484, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 2.1761, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.43285024154589374, |
|
"grad_norm": 2.4806456565856934, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 2.195, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.43285024154589374, |
|
"eval_loss": 2.086169481277466, |
|
"eval_runtime": 10.2506, |
|
"eval_samples_per_second": 23.901, |
|
"eval_steps_per_second": 3.024, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.4431561996779388, |
|
"grad_norm": 2.7219581604003906, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 2.1314, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.4534621578099839, |
|
"grad_norm": 3.6275038719177246, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 2.1038, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.4534621578099839, |
|
"eval_loss": 2.0899009704589844, |
|
"eval_runtime": 10.2299, |
|
"eval_samples_per_second": 23.949, |
|
"eval_steps_per_second": 3.03, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.463768115942029, |
|
"grad_norm": 3.646092176437378, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 2.1985, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 3.2789456844329834, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 2.1159, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"eval_loss": 2.0844767093658447, |
|
"eval_runtime": 10.2405, |
|
"eval_samples_per_second": 23.924, |
|
"eval_steps_per_second": 3.027, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.48438003220611914, |
|
"grad_norm": 2.5345401763916016, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 2.1215, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.49468599033816424, |
|
"grad_norm": 2.662414073944092, |
|
"learning_rate": 0.00015743756320098332, |
|
"loss": 2.1605, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.49468599033816424, |
|
"eval_loss": 2.080495595932007, |
|
"eval_runtime": 10.2407, |
|
"eval_samples_per_second": 23.924, |
|
"eval_steps_per_second": 3.027, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.5049919484702093, |
|
"grad_norm": 3.060046672821045, |
|
"learning_rate": 0.00015248009171495378, |
|
"loss": 2.0823, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.5152979066022544, |
|
"grad_norm": 2.4873805046081543, |
|
"learning_rate": 0.00014751990828504622, |
|
"loss": 2.0894, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5152979066022544, |
|
"eval_loss": 2.0765280723571777, |
|
"eval_runtime": 10.227, |
|
"eval_samples_per_second": 23.956, |
|
"eval_steps_per_second": 3.031, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5256038647342995, |
|
"grad_norm": 2.515195608139038, |
|
"learning_rate": 0.00014256243679901663, |
|
"loss": 2.1746, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.5359098228663446, |
|
"grad_norm": 2.422018051147461, |
|
"learning_rate": 0.00013761309817915014, |
|
"loss": 2.1368, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.5359098228663446, |
|
"eval_loss": 2.074774742126465, |
|
"eval_runtime": 10.2435, |
|
"eval_samples_per_second": 23.918, |
|
"eval_steps_per_second": 3.026, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.5462157809983897, |
|
"grad_norm": 2.5775578022003174, |
|
"learning_rate": 0.00013267730445456208, |
|
"loss": 2.1201, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"grad_norm": 2.872375249862671, |
|
"learning_rate": 0.00012776045284322368, |
|
"loss": 2.1626, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"eval_loss": 2.0714917182922363, |
|
"eval_runtime": 10.2294, |
|
"eval_samples_per_second": 23.951, |
|
"eval_steps_per_second": 3.03, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.5668276972624798, |
|
"grad_norm": 3.076042652130127, |
|
"learning_rate": 0.00012286791985018355, |
|
"loss": 2.1489, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.577133655394525, |
|
"grad_norm": 2.7333149909973145, |
|
"learning_rate": 0.00011800505538843798, |
|
"loss": 2.0765, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.577133655394525, |
|
"eval_loss": 2.0629920959472656, |
|
"eval_runtime": 10.2284, |
|
"eval_samples_per_second": 23.953, |
|
"eval_steps_per_second": 3.031, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.58743961352657, |
|
"grad_norm": 2.5191431045532227, |
|
"learning_rate": 0.00011317717692888012, |
|
"loss": 2.1448, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.5977455716586151, |
|
"grad_norm": 2.869346857070923, |
|
"learning_rate": 0.00010838956368572334, |
|
"loss": 2.0879, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.5977455716586151, |
|
"eval_loss": 2.0677056312561035, |
|
"eval_runtime": 10.2216, |
|
"eval_samples_per_second": 23.969, |
|
"eval_steps_per_second": 3.033, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.6080515297906602, |
|
"grad_norm": 2.3626813888549805, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 2.1169, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"grad_norm": 2.478994369506836, |
|
"learning_rate": 9.895602383375353e-05, |
|
"loss": 2.0851, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6183574879227053, |
|
"eval_loss": 2.0554275512695312, |
|
"eval_runtime": 10.1901, |
|
"eval_samples_per_second": 24.043, |
|
"eval_steps_per_second": 3.042, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6286634460547504, |
|
"grad_norm": 2.3692219257354736, |
|
"learning_rate": 9.432041266226686e-05, |
|
"loss": 2.0926, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.6389694041867955, |
|
"grad_norm": 2.496814012527466, |
|
"learning_rate": 8.97456863020546e-05, |
|
"loss": 2.0731, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.6389694041867955, |
|
"eval_loss": 2.054917812347412, |
|
"eval_runtime": 10.1928, |
|
"eval_samples_per_second": 24.037, |
|
"eval_steps_per_second": 3.041, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.6492753623188405, |
|
"grad_norm": 2.299753427505493, |
|
"learning_rate": 8.523684714922608e-05, |
|
"loss": 2.0903, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.6595813204508857, |
|
"grad_norm": 2.7625904083251953, |
|
"learning_rate": 8.079882555319684e-05, |
|
"loss": 2.113, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.6595813204508857, |
|
"eval_loss": 2.0516955852508545, |
|
"eval_runtime": 10.1568, |
|
"eval_samples_per_second": 24.122, |
|
"eval_steps_per_second": 3.052, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.6698872785829307, |
|
"grad_norm": 2.4187095165252686, |
|
"learning_rate": 7.643647442542382e-05, |
|
"loss": 2.1152, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6801932367149759, |
|
"grad_norm": 2.1766324043273926, |
|
"learning_rate": 7.215456393281776e-05, |
|
"loss": 2.0796, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.6801932367149759, |
|
"eval_loss": 2.0484488010406494, |
|
"eval_runtime": 10.1383, |
|
"eval_samples_per_second": 24.166, |
|
"eval_steps_per_second": 3.058, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.6904991948470209, |
|
"grad_norm": 2.2651991844177246, |
|
"learning_rate": 6.795777628163599e-05, |
|
"loss": 2.1202, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.700805152979066, |
|
"grad_norm": 1.8196227550506592, |
|
"learning_rate": 6.385070059755846e-05, |
|
"loss": 2.1406, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.700805152979066, |
|
"eval_loss": 2.0456597805023193, |
|
"eval_runtime": 32.4805, |
|
"eval_samples_per_second": 7.543, |
|
"eval_steps_per_second": 0.954, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 2.1418817043304443, |
|
"learning_rate": 5.983782790754623e-05, |
|
"loss": 2.0593, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.7214170692431562, |
|
"grad_norm": 2.5641324520111084, |
|
"learning_rate": 5.592354622896944e-05, |
|
"loss": 2.0454, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.7214170692431562, |
|
"eval_loss": 2.042100667953491, |
|
"eval_runtime": 32.5534, |
|
"eval_samples_per_second": 7.526, |
|
"eval_steps_per_second": 0.952, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.7317230273752013, |
|
"grad_norm": 2.242624282836914, |
|
"learning_rate": 5.211213577137469e-05, |
|
"loss": 2.1192, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.7420289855072464, |
|
"grad_norm": 2.441951274871826, |
|
"learning_rate": 4.840776425613886e-05, |
|
"loss": 2.1278, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.7420289855072464, |
|
"eval_loss": 2.037829637527466, |
|
"eval_runtime": 32.37, |
|
"eval_samples_per_second": 7.569, |
|
"eval_steps_per_second": 0.958, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.7523349436392914, |
|
"grad_norm": 2.0311145782470703, |
|
"learning_rate": 4.481448235912671e-05, |
|
"loss": 2.1032, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.7626409017713366, |
|
"grad_norm": 2.180769205093384, |
|
"learning_rate": 4.133621928133665e-05, |
|
"loss": 2.0616, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.7626409017713366, |
|
"eval_loss": 2.035311222076416, |
|
"eval_runtime": 32.434, |
|
"eval_samples_per_second": 7.554, |
|
"eval_steps_per_second": 0.956, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.7729468599033816, |
|
"grad_norm": 2.1823863983154297, |
|
"learning_rate": 3.797677845237696e-05, |
|
"loss": 2.1213, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7832528180354267, |
|
"grad_norm": 2.1388301849365234, |
|
"learning_rate": 3.473983337147118e-05, |
|
"loss": 2.0697, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.7832528180354267, |
|
"eval_loss": 2.033958673477173, |
|
"eval_runtime": 32.6119, |
|
"eval_samples_per_second": 7.513, |
|
"eval_steps_per_second": 0.951, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.7935587761674718, |
|
"grad_norm": 2.2005107402801514, |
|
"learning_rate": 3.162892359054098e-05, |
|
"loss": 2.029, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.8038647342995169, |
|
"grad_norm": 2.344057321548462, |
|
"learning_rate": 2.8647450843757897e-05, |
|
"loss": 2.0557, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.8038647342995169, |
|
"eval_loss": 2.029961585998535, |
|
"eval_runtime": 31.6467, |
|
"eval_samples_per_second": 7.742, |
|
"eval_steps_per_second": 0.98, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.814170692431562, |
|
"grad_norm": 2.2549006938934326, |
|
"learning_rate": 2.5798675327796993e-05, |
|
"loss": 2.085, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.8244766505636071, |
|
"grad_norm": 2.0386688709259033, |
|
"learning_rate": 2.3085712136859668e-05, |
|
"loss": 2.0954, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.8244766505636071, |
|
"eval_loss": 2.030390501022339, |
|
"eval_runtime": 32.6796, |
|
"eval_samples_per_second": 7.497, |
|
"eval_steps_per_second": 0.949, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.8347826086956521, |
|
"grad_norm": 1.9037587642669678, |
|
"learning_rate": 2.0511527856363912e-05, |
|
"loss": 2.114, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.8450885668276973, |
|
"grad_norm": 1.6996440887451172, |
|
"learning_rate": 1.8078937319026654e-05, |
|
"loss": 2.094, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.8450885668276973, |
|
"eval_loss": 2.0296928882598877, |
|
"eval_runtime": 10.2274, |
|
"eval_samples_per_second": 23.955, |
|
"eval_steps_per_second": 3.031, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.8553945249597423, |
|
"grad_norm": 2.2876534461975098, |
|
"learning_rate": 1.579060052688548e-05, |
|
"loss": 2.0518, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.8657004830917875, |
|
"grad_norm": 2.0373549461364746, |
|
"learning_rate": 1.3649019742625623e-05, |
|
"loss": 2.0539, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.8657004830917875, |
|
"eval_loss": 2.0265724658966064, |
|
"eval_runtime": 10.2427, |
|
"eval_samples_per_second": 23.919, |
|
"eval_steps_per_second": 3.027, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.8760064412238325, |
|
"grad_norm": 2.0661487579345703, |
|
"learning_rate": 1.1656536753392287e-05, |
|
"loss": 2.0917, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8863123993558776, |
|
"grad_norm": 2.622814655303955, |
|
"learning_rate": 9.815330310080887e-06, |
|
"loss": 2.0866, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.8863123993558776, |
|
"eval_loss": 2.0249547958374023, |
|
"eval_runtime": 10.2333, |
|
"eval_samples_per_second": 23.941, |
|
"eval_steps_per_second": 3.029, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.8966183574879227, |
|
"grad_norm": 1.9814549684524536, |
|
"learning_rate": 8.127413744904804e-06, |
|
"loss": 2.0342, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.9069243156199678, |
|
"grad_norm": 2.146690607070923, |
|
"learning_rate": 6.594632769846353e-06, |
|
"loss": 2.061, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.9069243156199678, |
|
"eval_loss": 2.0226657390594482, |
|
"eval_runtime": 10.24, |
|
"eval_samples_per_second": 23.926, |
|
"eval_steps_per_second": 3.027, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.9172302737520129, |
|
"grad_norm": 2.2478368282318115, |
|
"learning_rate": 5.218663458397715e-06, |
|
"loss": 2.1308, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"grad_norm": 1.9740926027297974, |
|
"learning_rate": 4.001010412799138e-06, |
|
"loss": 2.126, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"eval_loss": 2.0219550132751465, |
|
"eval_runtime": 10.2389, |
|
"eval_samples_per_second": 23.928, |
|
"eval_steps_per_second": 3.028, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.937842190016103, |
|
"grad_norm": 2.0188748836517334, |
|
"learning_rate": 2.9430051187785962e-06, |
|
"loss": 2.0966, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 2.021127939224243, |
|
"learning_rate": 2.0458044895916513e-06, |
|
"loss": 2.0616, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"eval_loss": 2.022151470184326, |
|
"eval_runtime": 10.2382, |
|
"eval_samples_per_second": 23.93, |
|
"eval_steps_per_second": 3.028, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.9584541062801932, |
|
"grad_norm": 2.2153139114379883, |
|
"learning_rate": 1.3103896009537207e-06, |
|
"loss": 2.0186, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.9687600644122383, |
|
"grad_norm": 2.1602697372436523, |
|
"learning_rate": 7.375646182482875e-07, |
|
"loss": 2.106, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.9687600644122383, |
|
"eval_loss": 2.0219268798828125, |
|
"eval_runtime": 10.2317, |
|
"eval_samples_per_second": 23.945, |
|
"eval_steps_per_second": 3.03, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.9790660225442834, |
|
"grad_norm": 2.1220812797546387, |
|
"learning_rate": 3.2795591718381975e-07, |
|
"loss": 2.0164, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.9893719806763285, |
|
"grad_norm": 2.2961068153381348, |
|
"learning_rate": 8.201139886109264e-08, |
|
"loss": 2.0596, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.9893719806763285, |
|
"eval_loss": 2.021911859512329, |
|
"eval_runtime": 10.235, |
|
"eval_samples_per_second": 23.937, |
|
"eval_steps_per_second": 3.029, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.9996779388083736, |
|
"grad_norm": 2.0847766399383545, |
|
"learning_rate": 0.0, |
|
"loss": 2.1024, |
|
"step": 388 |
|
} |
|
], |
|
"logging_steps": 4, |
|
"max_steps": 388, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.611644452078223e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|