|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 371900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.0875967741012573, |
|
"learning_rate": 3.125e-06, |
|
"loss": 7.5939, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.8077093362808228, |
|
"learning_rate": 6.25e-06, |
|
"loss": 5.8309, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.8327828049659729, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 5.3685, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.9203261137008667, |
|
"learning_rate": 1.25e-05, |
|
"loss": 5.1581, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.9855815768241882, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 5.0109, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 1.1521941423416138, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 4.8668, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 1.1727802753448486, |
|
"learning_rate": 2.1875e-05, |
|
"loss": 4.759, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 1.1063674688339233, |
|
"learning_rate": 2.5e-05, |
|
"loss": 4.6561, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.178877592086792, |
|
"learning_rate": 2.8125000000000003e-05, |
|
"loss": 4.5671, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 1.0879851579666138, |
|
"learning_rate": 3.125e-05, |
|
"loss": 4.4872, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 1.1763601303100586, |
|
"learning_rate": 3.4371875e-05, |
|
"loss": 4.4182, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 1.0679717063903809, |
|
"learning_rate": 3.7496875e-05, |
|
"loss": 4.3542, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.051241159439087, |
|
"learning_rate": 4.061875e-05, |
|
"loss": 4.2985, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 1.0657079219818115, |
|
"learning_rate": 4.374375e-05, |
|
"loss": 4.2397, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 1.0396409034729004, |
|
"learning_rate": 4.686875e-05, |
|
"loss": 4.1923, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.9875408411026001, |
|
"learning_rate": 4.999375e-05, |
|
"loss": 4.1418, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 1.003056526184082, |
|
"learning_rate": 5.3115625000000005e-05, |
|
"loss": 4.091, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.9668901562690735, |
|
"learning_rate": 5.62375e-05, |
|
"loss": 4.0553, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.3076893208292497, |
|
"eval_loss": 4.2751569747924805, |
|
"eval_runtime": 153.8934, |
|
"eval_samples_per_second": 376.371, |
|
"eval_steps_per_second": 5.887, |
|
"step": 18595 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 1.0316466093063354, |
|
"learning_rate": 5.93625e-05, |
|
"loss": 4.0151, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 1.0109944343566895, |
|
"learning_rate": 6.24875e-05, |
|
"loss": 3.9733, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 1.0080300569534302, |
|
"learning_rate": 6.56125e-05, |
|
"loss": 3.9328, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 1.0754659175872803, |
|
"learning_rate": 6.873125000000001e-05, |
|
"loss": 3.8969, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 1.018548607826233, |
|
"learning_rate": 7.185625e-05, |
|
"loss": 3.8657, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 0.9804821014404297, |
|
"learning_rate": 7.4975e-05, |
|
"loss": 3.8338, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 1.0100795030593872, |
|
"learning_rate": 7.81e-05, |
|
"loss": 3.7995, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.9405222535133362, |
|
"learning_rate": 8.1221875e-05, |
|
"loss": 3.773, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.9475222826004028, |
|
"learning_rate": 8.434687500000001e-05, |
|
"loss": 3.751, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 0.890032172203064, |
|
"learning_rate": 8.746875e-05, |
|
"loss": 3.7279, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.9058054685592651, |
|
"learning_rate": 9.059375e-05, |
|
"loss": 3.7031, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 0.8877508044242859, |
|
"learning_rate": 9.3715625e-05, |
|
"loss": 3.6828, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.8418898582458496, |
|
"learning_rate": 9.684062500000001e-05, |
|
"loss": 3.6672, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.8882458806037903, |
|
"learning_rate": 9.9965625e-05, |
|
"loss": 3.6463, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 0.8538274168968201, |
|
"learning_rate": 9.970903206825538e-05, |
|
"loss": 3.6331, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 0.8449269533157349, |
|
"learning_rate": 9.941512209473375e-05, |
|
"loss": 3.6114, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.8731977343559265, |
|
"learning_rate": 9.912150632538982e-05, |
|
"loss": 3.5938, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 0.8085331916809082, |
|
"learning_rate": 9.88273021476905e-05, |
|
"loss": 3.5792, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"grad_norm": 0.8539865612983704, |
|
"learning_rate": 9.853309796999118e-05, |
|
"loss": 3.5587, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.3633328691293394, |
|
"eval_loss": 3.7501370906829834, |
|
"eval_runtime": 155.089, |
|
"eval_samples_per_second": 373.47, |
|
"eval_steps_per_second": 5.842, |
|
"step": 37190 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 0.8294937610626221, |
|
"learning_rate": 9.823889379229185e-05, |
|
"loss": 3.5228, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 0.7889467477798462, |
|
"learning_rate": 9.794468961459253e-05, |
|
"loss": 3.5132, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 0.7929906249046326, |
|
"learning_rate": 9.765048543689321e-05, |
|
"loss": 3.495, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 0.8031061887741089, |
|
"learning_rate": 9.735657546337158e-05, |
|
"loss": 3.4887, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 0.8079201579093933, |
|
"learning_rate": 9.706266548984995e-05, |
|
"loss": 3.4837, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"grad_norm": 0.8008145689964294, |
|
"learning_rate": 9.676846131215064e-05, |
|
"loss": 3.471, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 0.7869082689285278, |
|
"learning_rate": 9.647425713445132e-05, |
|
"loss": 3.4643, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 0.8000088930130005, |
|
"learning_rate": 9.618005295675198e-05, |
|
"loss": 3.4542, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 0.7834064960479736, |
|
"learning_rate": 9.588614298323037e-05, |
|
"loss": 3.4491, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 0.7652507424354553, |
|
"learning_rate": 9.559193880553105e-05, |
|
"loss": 3.4387, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 0.8015010356903076, |
|
"learning_rate": 9.529802883200942e-05, |
|
"loss": 3.4331, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 0.8380176424980164, |
|
"learning_rate": 9.500382465431009e-05, |
|
"loss": 3.4261, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 0.7744593024253845, |
|
"learning_rate": 9.470962047661077e-05, |
|
"loss": 3.4181, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 0.7821061015129089, |
|
"learning_rate": 9.441541629891145e-05, |
|
"loss": 3.412, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.7764970660209656, |
|
"learning_rate": 9.412150632538983e-05, |
|
"loss": 3.4048, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 0.7456904649734497, |
|
"learning_rate": 9.38273021476905e-05, |
|
"loss": 3.3998, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 0.7432152032852173, |
|
"learning_rate": 9.353339217416887e-05, |
|
"loss": 3.394, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 0.7415820956230164, |
|
"learning_rate": 9.323948220064726e-05, |
|
"loss": 3.3865, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.3815021224736739, |
|
"eval_loss": 3.5883822441101074, |
|
"eval_runtime": 154.6982, |
|
"eval_samples_per_second": 374.413, |
|
"eval_steps_per_second": 5.857, |
|
"step": 55785 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"grad_norm": 0.7807851433753967, |
|
"learning_rate": 9.294527802294794e-05, |
|
"loss": 3.3755, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"grad_norm": 0.7891095876693726, |
|
"learning_rate": 9.26510738452486e-05, |
|
"loss": 3.3311, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 0.7911957502365112, |
|
"learning_rate": 9.235686966754929e-05, |
|
"loss": 3.3342, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"grad_norm": 0.7487536072731018, |
|
"learning_rate": 9.206295969402766e-05, |
|
"loss": 3.3276, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"grad_norm": 0.7860606908798218, |
|
"learning_rate": 9.176875551632832e-05, |
|
"loss": 3.3254, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"grad_norm": 0.7669396996498108, |
|
"learning_rate": 9.147484554280671e-05, |
|
"loss": 3.3237, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"grad_norm": 0.7570671439170837, |
|
"learning_rate": 9.118064136510739e-05, |
|
"loss": 3.3181, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"grad_norm": 0.764175295829773, |
|
"learning_rate": 9.088643718740807e-05, |
|
"loss": 3.3194, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"grad_norm": 0.723421573638916, |
|
"learning_rate": 9.059252721388644e-05, |
|
"loss": 3.3182, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 0.7632123231887817, |
|
"learning_rate": 9.029861724036481e-05, |
|
"loss": 3.3158, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"grad_norm": 0.7372169494628906, |
|
"learning_rate": 9.00044130626655e-05, |
|
"loss": 3.3099, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 0.7337743639945984, |
|
"learning_rate": 8.971050308914388e-05, |
|
"loss": 3.3064, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"grad_norm": 0.7390604019165039, |
|
"learning_rate": 8.941629891144455e-05, |
|
"loss": 3.3011, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"grad_norm": 0.7338905334472656, |
|
"learning_rate": 8.912238893792292e-05, |
|
"loss": 3.3003, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"grad_norm": 0.7679952383041382, |
|
"learning_rate": 8.88281847602236e-05, |
|
"loss": 3.2998, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"grad_norm": 0.7354390025138855, |
|
"learning_rate": 8.853398058252428e-05, |
|
"loss": 3.2903, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"grad_norm": 0.769103467464447, |
|
"learning_rate": 8.823977640482495e-05, |
|
"loss": 3.2916, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"grad_norm": 0.7130835652351379, |
|
"learning_rate": 8.794616063548102e-05, |
|
"loss": 3.2913, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"grad_norm": 0.7574236989021301, |
|
"learning_rate": 8.76519564577817e-05, |
|
"loss": 3.2906, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.389078925897793, |
|
"eval_loss": 3.505403757095337, |
|
"eval_runtime": 154.7175, |
|
"eval_samples_per_second": 374.366, |
|
"eval_steps_per_second": 5.856, |
|
"step": 74380 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"grad_norm": 0.7582383155822754, |
|
"learning_rate": 8.735775228008238e-05, |
|
"loss": 3.2494, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"grad_norm": 0.7709288597106934, |
|
"learning_rate": 8.706354810238306e-05, |
|
"loss": 3.2266, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"grad_norm": 0.7542001605033875, |
|
"learning_rate": 8.676934392468373e-05, |
|
"loss": 3.2335, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"grad_norm": 0.7459376454353333, |
|
"learning_rate": 8.647513974698441e-05, |
|
"loss": 3.2389, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"grad_norm": 0.7790886163711548, |
|
"learning_rate": 8.61812297734628e-05, |
|
"loss": 3.2343, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"grad_norm": 0.7570230960845947, |
|
"learning_rate": 8.588702559576346e-05, |
|
"loss": 3.236, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"grad_norm": 0.7441847324371338, |
|
"learning_rate": 8.559311562224183e-05, |
|
"loss": 3.2345, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"grad_norm": 0.7472123503684998, |
|
"learning_rate": 8.529891144454252e-05, |
|
"loss": 3.2338, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"grad_norm": 0.8000940680503845, |
|
"learning_rate": 8.50050014710209e-05, |
|
"loss": 3.2329, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"grad_norm": 0.763556957244873, |
|
"learning_rate": 8.471079729332157e-05, |
|
"loss": 3.2309, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"grad_norm": 0.7414514422416687, |
|
"learning_rate": 8.441659311562225e-05, |
|
"loss": 3.2346, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"grad_norm": 0.7634936571121216, |
|
"learning_rate": 8.412238893792293e-05, |
|
"loss": 3.228, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"grad_norm": 0.7772691249847412, |
|
"learning_rate": 8.38284789644013e-05, |
|
"loss": 3.2234, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"grad_norm": 0.7693132758140564, |
|
"learning_rate": 8.353427478670197e-05, |
|
"loss": 3.2299, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"grad_norm": 0.7822285294532776, |
|
"learning_rate": 8.324007060900265e-05, |
|
"loss": 3.2258, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"grad_norm": 0.7286272644996643, |
|
"learning_rate": 8.294616063548103e-05, |
|
"loss": 3.2252, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"grad_norm": 0.7409440279006958, |
|
"learning_rate": 8.265195645778171e-05, |
|
"loss": 3.2183, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"grad_norm": 0.7559691667556763, |
|
"learning_rate": 8.235775228008238e-05, |
|
"loss": 3.2202, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.39429668627542264, |
|
"eval_loss": 3.4769208431243896, |
|
"eval_runtime": 155.051, |
|
"eval_samples_per_second": 373.561, |
|
"eval_steps_per_second": 5.843, |
|
"step": 92975 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.7476261854171753, |
|
"learning_rate": 8.206384230656075e-05, |
|
"loss": 3.2131, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"grad_norm": 0.7305045127868652, |
|
"learning_rate": 8.176993233303914e-05, |
|
"loss": 3.1643, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"grad_norm": 0.758934497833252, |
|
"learning_rate": 8.147572815533982e-05, |
|
"loss": 3.1671, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"grad_norm": 0.7448575496673584, |
|
"learning_rate": 8.118152397764049e-05, |
|
"loss": 3.1674, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"grad_norm": 0.7979652881622314, |
|
"learning_rate": 8.088731979994117e-05, |
|
"loss": 3.1695, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"grad_norm": 0.7594915628433228, |
|
"learning_rate": 8.059311562224185e-05, |
|
"loss": 3.1731, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"grad_norm": 0.7524967789649963, |
|
"learning_rate": 8.02992056487202e-05, |
|
"loss": 3.1724, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"grad_norm": 0.7075839042663574, |
|
"learning_rate": 8.000500147102089e-05, |
|
"loss": 3.1744, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"grad_norm": 0.7107969522476196, |
|
"learning_rate": 7.971109149749927e-05, |
|
"loss": 3.1691, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"grad_norm": 0.7338487505912781, |
|
"learning_rate": 7.941688731979995e-05, |
|
"loss": 3.1714, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"grad_norm": 0.7563539743423462, |
|
"learning_rate": 7.912268314210062e-05, |
|
"loss": 3.1702, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"grad_norm": 0.7359240055084229, |
|
"learning_rate": 7.882877316857899e-05, |
|
"loss": 3.1703, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"grad_norm": 0.7448987364768982, |
|
"learning_rate": 7.853456899087967e-05, |
|
"loss": 3.172, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"grad_norm": 0.7606428265571594, |
|
"learning_rate": 7.824036481318035e-05, |
|
"loss": 3.1726, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"grad_norm": 0.7235444784164429, |
|
"learning_rate": 7.794616063548102e-05, |
|
"loss": 3.1733, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"grad_norm": 0.7561464905738831, |
|
"learning_rate": 7.76522506619594e-05, |
|
"loss": 3.1696, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"grad_norm": 0.7387487292289734, |
|
"learning_rate": 7.735804648426009e-05, |
|
"loss": 3.1705, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"grad_norm": 0.7437654137611389, |
|
"learning_rate": 7.706443071491616e-05, |
|
"loss": 3.1642, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"grad_norm": 0.7192903161048889, |
|
"learning_rate": 7.677022653721683e-05, |
|
"loss": 3.1699, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.3977428802964349, |
|
"eval_loss": 3.4425718784332275, |
|
"eval_runtime": 154.7206, |
|
"eval_samples_per_second": 374.359, |
|
"eval_steps_per_second": 5.856, |
|
"step": 111570 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"grad_norm": 0.7664169073104858, |
|
"learning_rate": 7.647602235951751e-05, |
|
"loss": 3.1431, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"grad_norm": 0.7941229343414307, |
|
"learning_rate": 7.618181818181819e-05, |
|
"loss": 3.1107, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"grad_norm": 0.7529132962226868, |
|
"learning_rate": 7.588761400411886e-05, |
|
"loss": 3.1141, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"grad_norm": 0.7258307933807373, |
|
"learning_rate": 7.559370403059724e-05, |
|
"loss": 3.1197, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"grad_norm": 0.7766565680503845, |
|
"learning_rate": 7.529949985289791e-05, |
|
"loss": 3.1211, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"grad_norm": 0.7603769302368164, |
|
"learning_rate": 7.5005884083554e-05, |
|
"loss": 3.1226, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"grad_norm": 0.7358362674713135, |
|
"learning_rate": 7.471167990585468e-05, |
|
"loss": 3.1224, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 0.7426795363426208, |
|
"learning_rate": 7.441747572815534e-05, |
|
"loss": 3.1258, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"grad_norm": 0.760974645614624, |
|
"learning_rate": 7.412327155045603e-05, |
|
"loss": 3.1284, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"grad_norm": 0.7280532717704773, |
|
"learning_rate": 7.38293615769344e-05, |
|
"loss": 3.1269, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"grad_norm": 0.7262037396430969, |
|
"learning_rate": 7.353515739923508e-05, |
|
"loss": 3.1293, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"grad_norm": 0.7488662600517273, |
|
"learning_rate": 7.324124742571345e-05, |
|
"loss": 3.1244, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"grad_norm": 0.7436386346817017, |
|
"learning_rate": 7.294704324801413e-05, |
|
"loss": 3.1273, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"grad_norm": 0.7284784913063049, |
|
"learning_rate": 7.265283907031481e-05, |
|
"loss": 3.1301, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"grad_norm": 0.7190147638320923, |
|
"learning_rate": 7.235863489261548e-05, |
|
"loss": 3.1281, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"grad_norm": 0.7366654276847839, |
|
"learning_rate": 7.206472491909385e-05, |
|
"loss": 3.1286, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"grad_norm": 0.7567622065544128, |
|
"learning_rate": 7.177081494557223e-05, |
|
"loss": 3.1262, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"grad_norm": 0.7190493941307068, |
|
"learning_rate": 7.147661076787291e-05, |
|
"loss": 3.1309, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"grad_norm": 0.7200164794921875, |
|
"learning_rate": 7.11824065901736e-05, |
|
"loss": 3.1232, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.399363026131729, |
|
"eval_loss": 3.447763442993164, |
|
"eval_runtime": 155.0052, |
|
"eval_samples_per_second": 373.671, |
|
"eval_steps_per_second": 5.845, |
|
"step": 130165 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"grad_norm": 0.7389765977859497, |
|
"learning_rate": 7.088820241247426e-05, |
|
"loss": 3.0769, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"grad_norm": 0.7703937888145447, |
|
"learning_rate": 7.059429243895263e-05, |
|
"loss": 3.0708, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"grad_norm": 0.7399336099624634, |
|
"learning_rate": 7.030008826125331e-05, |
|
"loss": 3.0763, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"grad_norm": 0.7627936005592346, |
|
"learning_rate": 7.000588408355398e-05, |
|
"loss": 3.0798, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"grad_norm": 0.7476488351821899, |
|
"learning_rate": 6.971167990585466e-05, |
|
"loss": 3.0854, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"grad_norm": 0.7593668103218079, |
|
"learning_rate": 6.941776993233305e-05, |
|
"loss": 3.0837, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"grad_norm": 0.7753123044967651, |
|
"learning_rate": 6.912356575463373e-05, |
|
"loss": 3.0842, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"grad_norm": 0.7626084089279175, |
|
"learning_rate": 6.882965578111209e-05, |
|
"loss": 3.0883, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"grad_norm": 0.7539852857589722, |
|
"learning_rate": 6.853545160341277e-05, |
|
"loss": 3.0873, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"grad_norm": 0.7760306000709534, |
|
"learning_rate": 6.824154162989115e-05, |
|
"loss": 3.0932, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"grad_norm": 0.7804057598114014, |
|
"learning_rate": 6.794733745219183e-05, |
|
"loss": 3.0908, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"grad_norm": 0.7768170833587646, |
|
"learning_rate": 6.76531332744925e-05, |
|
"loss": 3.0889, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"grad_norm": 0.7796506881713867, |
|
"learning_rate": 6.735892909679318e-05, |
|
"loss": 3.0864, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"grad_norm": 0.7837468981742859, |
|
"learning_rate": 6.706472491909386e-05, |
|
"loss": 3.0948, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"grad_norm": 0.7560127377510071, |
|
"learning_rate": 6.677081494557223e-05, |
|
"loss": 3.0946, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"grad_norm": 0.7457463145256042, |
|
"learning_rate": 6.64769049720506e-05, |
|
"loss": 3.092, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"grad_norm": 0.7344046235084534, |
|
"learning_rate": 6.618270079435128e-05, |
|
"loss": 3.0904, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"grad_norm": 0.7419469356536865, |
|
"learning_rate": 6.588849661665197e-05, |
|
"loss": 3.091, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.4014489882469057, |
|
"eval_loss": 3.42431378364563, |
|
"eval_runtime": 154.6897, |
|
"eval_samples_per_second": 374.433, |
|
"eval_steps_per_second": 5.857, |
|
"step": 148760 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"grad_norm": 0.7680034041404724, |
|
"learning_rate": 6.559458664313034e-05, |
|
"loss": 3.0783, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"grad_norm": 0.7723122239112854, |
|
"learning_rate": 6.5300382465431e-05, |
|
"loss": 3.0382, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"grad_norm": 0.8067519068717957, |
|
"learning_rate": 6.500617828773169e-05, |
|
"loss": 3.0417, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"grad_norm": 0.7817720770835876, |
|
"learning_rate": 6.471197411003237e-05, |
|
"loss": 3.0428, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"grad_norm": 0.785193145275116, |
|
"learning_rate": 6.441806413651074e-05, |
|
"loss": 3.0471, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"grad_norm": 0.7578865885734558, |
|
"learning_rate": 6.412385995881142e-05, |
|
"loss": 3.049, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"grad_norm": 0.75961834192276, |
|
"learning_rate": 6.382994998528979e-05, |
|
"loss": 3.0465, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"grad_norm": 0.7525945901870728, |
|
"learning_rate": 6.353574580759047e-05, |
|
"loss": 3.0529, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"grad_norm": 0.7648568153381348, |
|
"learning_rate": 6.324183583406885e-05, |
|
"loss": 3.055, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"grad_norm": 0.7817012071609497, |
|
"learning_rate": 6.294763165636952e-05, |
|
"loss": 3.059, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"grad_norm": 0.7443410158157349, |
|
"learning_rate": 6.26534274786702e-05, |
|
"loss": 3.0597, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"grad_norm": 0.7928184866905212, |
|
"learning_rate": 6.235951750514857e-05, |
|
"loss": 3.0572, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"grad_norm": 0.7541539669036865, |
|
"learning_rate": 6.206531332744924e-05, |
|
"loss": 3.0582, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"grad_norm": 0.7730678915977478, |
|
"learning_rate": 6.177110914974992e-05, |
|
"loss": 3.0563, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"grad_norm": 0.7646264433860779, |
|
"learning_rate": 6.14769049720506e-05, |
|
"loss": 3.0607, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"grad_norm": 0.7786790728569031, |
|
"learning_rate": 6.118270079435127e-05, |
|
"loss": 3.0632, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 8.87, |
|
"grad_norm": 0.7602713704109192, |
|
"learning_rate": 6.0888790820829656e-05, |
|
"loss": 3.0601, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 8.93, |
|
"grad_norm": 0.7597308158874512, |
|
"learning_rate": 6.059458664313034e-05, |
|
"loss": 3.0615, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"grad_norm": 0.8023492693901062, |
|
"learning_rate": 6.030038246543102e-05, |
|
"loss": 3.0613, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.4030179440035626, |
|
"eval_loss": 3.4169299602508545, |
|
"eval_runtime": 154.797, |
|
"eval_samples_per_second": 374.174, |
|
"eval_steps_per_second": 5.853, |
|
"step": 167355 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"grad_norm": 0.7707279324531555, |
|
"learning_rate": 6.000676669608709e-05, |
|
"loss": 3.0235, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"grad_norm": 0.8100076913833618, |
|
"learning_rate": 5.971256251838776e-05, |
|
"loss": 3.0067, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"grad_norm": 0.761734127998352, |
|
"learning_rate": 5.941835834068844e-05, |
|
"loss": 3.0091, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"grad_norm": 0.8010926842689514, |
|
"learning_rate": 5.912415416298912e-05, |
|
"loss": 3.0168, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"grad_norm": 0.8410947322845459, |
|
"learning_rate": 5.88302441894675e-05, |
|
"loss": 3.014, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"grad_norm": 0.7763057351112366, |
|
"learning_rate": 5.853633421594586e-05, |
|
"loss": 3.019, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"grad_norm": 0.7941156625747681, |
|
"learning_rate": 5.8242130038246544e-05, |
|
"loss": 3.0273, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"grad_norm": 0.821797251701355, |
|
"learning_rate": 5.7947925860547225e-05, |
|
"loss": 3.0265, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"grad_norm": 0.7570799589157104, |
|
"learning_rate": 5.76540158870256e-05, |
|
"loss": 3.0298, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"grad_norm": 0.7869284152984619, |
|
"learning_rate": 5.735981170932627e-05, |
|
"loss": 3.0312, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"grad_norm": 0.8196205496788025, |
|
"learning_rate": 5.706560753162695e-05, |
|
"loss": 3.0273, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"grad_norm": 0.7756869792938232, |
|
"learning_rate": 5.677169755810533e-05, |
|
"loss": 3.0253, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"grad_norm": 0.8128073811531067, |
|
"learning_rate": 5.6477493380406e-05, |
|
"loss": 3.0288, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"grad_norm": 0.7797793745994568, |
|
"learning_rate": 5.618328920270668e-05, |
|
"loss": 3.0322, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"grad_norm": 0.789199948310852, |
|
"learning_rate": 5.588908502500736e-05, |
|
"loss": 3.036, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"grad_norm": 0.7418550848960876, |
|
"learning_rate": 5.5595175051485736e-05, |
|
"loss": 3.0368, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"grad_norm": 0.8195221424102783, |
|
"learning_rate": 5.5300970873786404e-05, |
|
"loss": 3.0334, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"grad_norm": 0.7761098742485046, |
|
"learning_rate": 5.5006766696087085e-05, |
|
"loss": 3.0322, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.4050055566763182, |
|
"eval_loss": 3.4141781330108643, |
|
"eval_runtime": 154.9579, |
|
"eval_samples_per_second": 373.785, |
|
"eval_steps_per_second": 5.847, |
|
"step": 185950 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.7680907249450684, |
|
"learning_rate": 5.471256251838777e-05, |
|
"loss": 3.0339, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 10.06, |
|
"grad_norm": 0.7804239392280579, |
|
"learning_rate": 5.441894674904384e-05, |
|
"loss": 2.9809, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"grad_norm": 0.8002775311470032, |
|
"learning_rate": 5.4124742571344514e-05, |
|
"loss": 2.9844, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 10.16, |
|
"grad_norm": 0.8047283291816711, |
|
"learning_rate": 5.383083259782289e-05, |
|
"loss": 2.9912, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"grad_norm": 0.8343998193740845, |
|
"learning_rate": 5.353692262430127e-05, |
|
"loss": 2.989, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"grad_norm": 0.798310399055481, |
|
"learning_rate": 5.3242718446601943e-05, |
|
"loss": 2.9925, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"grad_norm": 0.8054454326629639, |
|
"learning_rate": 5.294880847308033e-05, |
|
"loss": 2.9986, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 10.38, |
|
"grad_norm": 0.810845136642456, |
|
"learning_rate": 5.2654604295380995e-05, |
|
"loss": 2.9996, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 10.43, |
|
"grad_norm": 0.7934587597846985, |
|
"learning_rate": 5.2360400117681677e-05, |
|
"loss": 3.0, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 10.49, |
|
"grad_norm": 0.8145301938056946, |
|
"learning_rate": 5.206619593998235e-05, |
|
"loss": 2.9977, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 10.54, |
|
"grad_norm": 0.7856894135475159, |
|
"learning_rate": 5.177228596646072e-05, |
|
"loss": 3.0025, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 10.59, |
|
"grad_norm": 0.8008938431739807, |
|
"learning_rate": 5.14780817887614e-05, |
|
"loss": 3.0059, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"grad_norm": 0.8000041246414185, |
|
"learning_rate": 5.1183877611062084e-05, |
|
"loss": 3.0025, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"grad_norm": 0.8157011866569519, |
|
"learning_rate": 5.088967343336276e-05, |
|
"loss": 3.0018, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 10.76, |
|
"grad_norm": 0.7584720253944397, |
|
"learning_rate": 5.059576345984113e-05, |
|
"loss": 3.0051, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"grad_norm": 0.7773544192314148, |
|
"learning_rate": 5.030155928214181e-05, |
|
"loss": 3.0052, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"grad_norm": 0.7875356078147888, |
|
"learning_rate": 5.000764930862019e-05, |
|
"loss": 3.0137, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"grad_norm": 0.7761446833610535, |
|
"learning_rate": 4.971344513092086e-05, |
|
"loss": 3.0143, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"grad_norm": 0.8193157315254211, |
|
"learning_rate": 4.941924095322154e-05, |
|
"loss": 3.0107, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.40485742790545853, |
|
"eval_loss": 3.4051809310913086, |
|
"eval_runtime": 154.8665, |
|
"eval_samples_per_second": 374.006, |
|
"eval_steps_per_second": 5.85, |
|
"step": 204545 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"grad_norm": 0.8065283298492432, |
|
"learning_rate": 4.9125330979699914e-05, |
|
"loss": 2.9828, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 11.08, |
|
"grad_norm": 0.8200777769088745, |
|
"learning_rate": 4.8831126802000595e-05, |
|
"loss": 2.96, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 11.13, |
|
"grad_norm": 0.8049452900886536, |
|
"learning_rate": 4.8537216828478965e-05, |
|
"loss": 2.9624, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"grad_norm": 0.8497307300567627, |
|
"learning_rate": 4.824301265077965e-05, |
|
"loss": 2.971, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 11.24, |
|
"grad_norm": 0.815825343132019, |
|
"learning_rate": 4.794880847308032e-05, |
|
"loss": 2.9686, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 11.29, |
|
"grad_norm": 0.8359244465827942, |
|
"learning_rate": 4.7654604295380996e-05, |
|
"loss": 2.972, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"grad_norm": 0.804949164390564, |
|
"learning_rate": 4.736069432185937e-05, |
|
"loss": 2.9756, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"grad_norm": 0.8300769925117493, |
|
"learning_rate": 4.706678434833775e-05, |
|
"loss": 2.9777, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"grad_norm": 0.8034248352050781, |
|
"learning_rate": 4.6772580170638425e-05, |
|
"loss": 2.9736, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 11.51, |
|
"grad_norm": 0.7853664755821228, |
|
"learning_rate": 4.64783759929391e-05, |
|
"loss": 2.9769, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"grad_norm": 0.8291251063346863, |
|
"learning_rate": 4.618417181523978e-05, |
|
"loss": 2.9748, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"grad_norm": 0.8467937111854553, |
|
"learning_rate": 4.5890556045895854e-05, |
|
"loss": 2.9849, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"grad_norm": 0.8191725015640259, |
|
"learning_rate": 4.559635186819653e-05, |
|
"loss": 2.983, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"grad_norm": 0.7857011556625366, |
|
"learning_rate": 4.530214769049721e-05, |
|
"loss": 2.9797, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"grad_norm": 0.8048397302627563, |
|
"learning_rate": 4.500823771697558e-05, |
|
"loss": 2.9844, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 11.83, |
|
"grad_norm": 0.816554069519043, |
|
"learning_rate": 4.471403353927626e-05, |
|
"loss": 2.986, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 11.88, |
|
"grad_norm": 0.8050833940505981, |
|
"learning_rate": 4.442012356575463e-05, |
|
"loss": 2.9887, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 11.94, |
|
"grad_norm": 0.8382700681686401, |
|
"learning_rate": 4.412621359223301e-05, |
|
"loss": 2.9892, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 11.99, |
|
"grad_norm": 0.8233689665794373, |
|
"learning_rate": 4.3832009414533684e-05, |
|
"loss": 2.9844, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.4053066510758616, |
|
"eval_loss": 3.4128060340881348, |
|
"eval_runtime": 155.24, |
|
"eval_samples_per_second": 373.106, |
|
"eval_steps_per_second": 5.836, |
|
"step": 223140 |
|
}, |
|
{ |
|
"epoch": 12.05, |
|
"grad_norm": 0.8400596976280212, |
|
"learning_rate": 4.353809944101207e-05, |
|
"loss": 2.9383, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"grad_norm": 0.8800355792045593, |
|
"learning_rate": 4.324389526331274e-05, |
|
"loss": 2.9441, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"grad_norm": 0.8398945927619934, |
|
"learning_rate": 4.2949691085613417e-05, |
|
"loss": 2.9484, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 12.21, |
|
"grad_norm": 0.8549145460128784, |
|
"learning_rate": 4.265548690791409e-05, |
|
"loss": 2.9443, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"grad_norm": 0.8680046796798706, |
|
"learning_rate": 4.2361282730214765e-05, |
|
"loss": 2.9485, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 12.32, |
|
"grad_norm": 0.8534417748451233, |
|
"learning_rate": 4.206737275669315e-05, |
|
"loss": 2.9479, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"grad_norm": 0.8083067536354065, |
|
"learning_rate": 4.1773168578993824e-05, |
|
"loss": 2.9497, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 12.42, |
|
"grad_norm": 0.810694694519043, |
|
"learning_rate": 4.14789644012945e-05, |
|
"loss": 2.959, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 12.48, |
|
"grad_norm": 0.8079540133476257, |
|
"learning_rate": 4.1185054427772876e-05, |
|
"loss": 2.9501, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 12.53, |
|
"grad_norm": 0.8427674174308777, |
|
"learning_rate": 4.089085025007355e-05, |
|
"loss": 2.9553, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 12.58, |
|
"grad_norm": 0.8193045854568481, |
|
"learning_rate": 4.0596646072374225e-05, |
|
"loss": 2.9604, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 12.64, |
|
"grad_norm": 0.8305484652519226, |
|
"learning_rate": 4.030273609885261e-05, |
|
"loss": 2.9623, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 12.69, |
|
"grad_norm": 0.8170352578163147, |
|
"learning_rate": 4.0008826125330986e-05, |
|
"loss": 2.9616, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 12.75, |
|
"grad_norm": 0.8222061395645142, |
|
"learning_rate": 3.971462194763166e-05, |
|
"loss": 2.9659, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"grad_norm": 0.8384312987327576, |
|
"learning_rate": 3.9420417769932335e-05, |
|
"loss": 2.9645, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 12.85, |
|
"grad_norm": 0.8427971005439758, |
|
"learning_rate": 3.9126213592233016e-05, |
|
"loss": 2.9628, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 12.91, |
|
"grad_norm": 0.8048679232597351, |
|
"learning_rate": 3.883200941453369e-05, |
|
"loss": 2.9675, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 12.96, |
|
"grad_norm": 0.8325705528259277, |
|
"learning_rate": 3.853809944101207e-05, |
|
"loss": 2.9671, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.4061510186590524, |
|
"eval_loss": 3.414961576461792, |
|
"eval_runtime": 154.7616, |
|
"eval_samples_per_second": 374.26, |
|
"eval_steps_per_second": 5.854, |
|
"step": 241735 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"grad_norm": 0.856779158115387, |
|
"learning_rate": 3.824418946749044e-05, |
|
"loss": 2.9534, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 13.07, |
|
"grad_norm": 0.8315408229827881, |
|
"learning_rate": 3.794998528979112e-05, |
|
"loss": 2.9192, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"grad_norm": 0.8712647557258606, |
|
"learning_rate": 3.7655781112091794e-05, |
|
"loss": 2.9236, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"grad_norm": 0.8575468063354492, |
|
"learning_rate": 3.7361576934392476e-05, |
|
"loss": 2.9209, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 13.23, |
|
"grad_norm": 0.848013699054718, |
|
"learning_rate": 3.7067666960870846e-05, |
|
"loss": 2.9224, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 13.28, |
|
"grad_norm": 0.8788365721702576, |
|
"learning_rate": 3.677346278317153e-05, |
|
"loss": 2.9313, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 13.34, |
|
"grad_norm": 0.8671311140060425, |
|
"learning_rate": 3.64792586054722e-05, |
|
"loss": 2.9354, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 13.39, |
|
"grad_norm": 0.8452686667442322, |
|
"learning_rate": 3.618534863195058e-05, |
|
"loss": 2.9335, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 13.44, |
|
"grad_norm": 0.8493407368659973, |
|
"learning_rate": 3.5891144454251254e-05, |
|
"loss": 2.9352, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"grad_norm": 0.8630385398864746, |
|
"learning_rate": 3.559694027655193e-05, |
|
"loss": 2.9366, |
|
"step": 251000 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"grad_norm": 0.8659392595291138, |
|
"learning_rate": 3.5303030303030305e-05, |
|
"loss": 2.9396, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 13.61, |
|
"grad_norm": 0.8576540350914001, |
|
"learning_rate": 3.500912032950868e-05, |
|
"loss": 2.9371, |
|
"step": 253000 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"grad_norm": 0.839272141456604, |
|
"learning_rate": 3.471491615180936e-05, |
|
"loss": 2.9399, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 13.71, |
|
"grad_norm": 0.8816892504692078, |
|
"learning_rate": 3.4421006178287734e-05, |
|
"loss": 2.9414, |
|
"step": 255000 |
|
}, |
|
{ |
|
"epoch": 13.77, |
|
"grad_norm": 0.8923898339271545, |
|
"learning_rate": 3.412680200058841e-05, |
|
"loss": 2.9429, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"grad_norm": 0.84392249584198, |
|
"learning_rate": 3.383259782288909e-05, |
|
"loss": 2.9459, |
|
"step": 257000 |
|
}, |
|
{ |
|
"epoch": 13.87, |
|
"grad_norm": 0.8665987849235535, |
|
"learning_rate": 3.3538393645189765e-05, |
|
"loss": 2.9467, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 13.93, |
|
"grad_norm": 0.8305437564849854, |
|
"learning_rate": 3.324448367166814e-05, |
|
"loss": 2.9475, |
|
"step": 259000 |
|
}, |
|
{ |
|
"epoch": 13.98, |
|
"grad_norm": 0.8880412578582764, |
|
"learning_rate": 3.2950279493968816e-05, |
|
"loss": 2.9477, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.4062352605994869, |
|
"eval_loss": 3.417443037033081, |
|
"eval_runtime": 154.6446, |
|
"eval_samples_per_second": 374.543, |
|
"eval_steps_per_second": 5.859, |
|
"step": 260330 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"grad_norm": 0.8674964308738708, |
|
"learning_rate": 3.265607531626949e-05, |
|
"loss": 2.9136, |
|
"step": 261000 |
|
}, |
|
{ |
|
"epoch": 14.09, |
|
"grad_norm": 0.8518526554107666, |
|
"learning_rate": 3.236216534274787e-05, |
|
"loss": 2.9045, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 14.14, |
|
"grad_norm": 0.8985535502433777, |
|
"learning_rate": 3.206796116504854e-05, |
|
"loss": 2.9094, |
|
"step": 263000 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"grad_norm": 0.9025124311447144, |
|
"learning_rate": 3.177405119152692e-05, |
|
"loss": 2.9072, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 14.25, |
|
"grad_norm": 0.866775631904602, |
|
"learning_rate": 3.1479847013827594e-05, |
|
"loss": 2.912, |
|
"step": 265000 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"grad_norm": 0.875120222568512, |
|
"learning_rate": 3.1185642836128276e-05, |
|
"loss": 2.9127, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"grad_norm": 0.8747627139091492, |
|
"learning_rate": 3.089143865842895e-05, |
|
"loss": 2.9133, |
|
"step": 267000 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"grad_norm": 0.8658396005630493, |
|
"learning_rate": 3.059752868490733e-05, |
|
"loss": 2.9134, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 14.47, |
|
"grad_norm": 0.8496713638305664, |
|
"learning_rate": 3.0303324507208002e-05, |
|
"loss": 2.9196, |
|
"step": 269000 |
|
}, |
|
{ |
|
"epoch": 14.52, |
|
"grad_norm": 0.8578941822052002, |
|
"learning_rate": 3.0009120329508683e-05, |
|
"loss": 2.92, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 14.57, |
|
"grad_norm": 0.8853529691696167, |
|
"learning_rate": 2.9715504560164753e-05, |
|
"loss": 2.9222, |
|
"step": 271000 |
|
}, |
|
{ |
|
"epoch": 14.63, |
|
"grad_norm": 0.8526709079742432, |
|
"learning_rate": 2.942159458664313e-05, |
|
"loss": 2.9165, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 14.68, |
|
"grad_norm": 0.8756287693977356, |
|
"learning_rate": 2.912739040894381e-05, |
|
"loss": 2.926, |
|
"step": 273000 |
|
}, |
|
{ |
|
"epoch": 14.74, |
|
"grad_norm": 0.8718807697296143, |
|
"learning_rate": 2.8833186231244486e-05, |
|
"loss": 2.924, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 14.79, |
|
"grad_norm": 0.874072790145874, |
|
"learning_rate": 2.853898205354516e-05, |
|
"loss": 2.9244, |
|
"step": 275000 |
|
}, |
|
{ |
|
"epoch": 14.84, |
|
"grad_norm": 0.8762756586074829, |
|
"learning_rate": 2.824477787584584e-05, |
|
"loss": 2.9211, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"grad_norm": 0.9252573251724243, |
|
"learning_rate": 2.7950867902324212e-05, |
|
"loss": 2.9225, |
|
"step": 277000 |
|
}, |
|
{ |
|
"epoch": 14.95, |
|
"grad_norm": 0.8735837936401367, |
|
"learning_rate": 2.7656663724624893e-05, |
|
"loss": 2.9272, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.4066799828049704, |
|
"eval_loss": 3.427541494369507, |
|
"eval_runtime": 155.041, |
|
"eval_samples_per_second": 373.585, |
|
"eval_steps_per_second": 5.844, |
|
"step": 278925 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 0.8828846216201782, |
|
"learning_rate": 2.7362459546925568e-05, |
|
"loss": 2.9242, |
|
"step": 279000 |
|
}, |
|
{ |
|
"epoch": 15.06, |
|
"grad_norm": 0.8483282923698425, |
|
"learning_rate": 2.7068255369226242e-05, |
|
"loss": 2.8881, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 15.11, |
|
"grad_norm": 0.8667340278625488, |
|
"learning_rate": 2.6774051191526924e-05, |
|
"loss": 2.8852, |
|
"step": 281000 |
|
}, |
|
{ |
|
"epoch": 15.17, |
|
"grad_norm": 0.8646563291549683, |
|
"learning_rate": 2.6480435422182997e-05, |
|
"loss": 2.8886, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 15.22, |
|
"grad_norm": 0.8643233776092529, |
|
"learning_rate": 2.618623124448367e-05, |
|
"loss": 2.8935, |
|
"step": 283000 |
|
}, |
|
{ |
|
"epoch": 15.27, |
|
"grad_norm": 0.9026147127151489, |
|
"learning_rate": 2.5892027066784353e-05, |
|
"loss": 2.8939, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"grad_norm": 0.8915610313415527, |
|
"learning_rate": 2.5597822889085027e-05, |
|
"loss": 2.8964, |
|
"step": 285000 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"grad_norm": 0.8528517484664917, |
|
"learning_rate": 2.53036187113857e-05, |
|
"loss": 2.8976, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 15.43, |
|
"grad_norm": 0.8766651749610901, |
|
"learning_rate": 2.500970873786408e-05, |
|
"loss": 2.9009, |
|
"step": 287000 |
|
}, |
|
{ |
|
"epoch": 15.49, |
|
"grad_norm": 0.897165060043335, |
|
"learning_rate": 2.4715504560164757e-05, |
|
"loss": 2.9008, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 15.54, |
|
"grad_norm": 0.8595399260520935, |
|
"learning_rate": 2.442159458664313e-05, |
|
"loss": 2.9063, |
|
"step": 289000 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"grad_norm": 0.8562242984771729, |
|
"learning_rate": 2.412739040894381e-05, |
|
"loss": 2.8988, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 15.65, |
|
"grad_norm": 0.8943966627120972, |
|
"learning_rate": 2.3833186231244483e-05, |
|
"loss": 2.905, |
|
"step": 291000 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"grad_norm": 0.8769930601119995, |
|
"learning_rate": 2.353927625772286e-05, |
|
"loss": 2.9032, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 15.76, |
|
"grad_norm": 0.8928766250610352, |
|
"learning_rate": 2.3245072080023535e-05, |
|
"loss": 2.907, |
|
"step": 293000 |
|
}, |
|
{ |
|
"epoch": 15.81, |
|
"grad_norm": 0.8999136686325073, |
|
"learning_rate": 2.2950867902324213e-05, |
|
"loss": 2.9099, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 15.86, |
|
"grad_norm": 0.8893632888793945, |
|
"learning_rate": 2.265695792880259e-05, |
|
"loss": 2.9039, |
|
"step": 295000 |
|
}, |
|
{ |
|
"epoch": 15.92, |
|
"grad_norm": 0.9111586213111877, |
|
"learning_rate": 2.2362753751103264e-05, |
|
"loss": 2.9071, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 15.97, |
|
"grad_norm": 0.8878467082977295, |
|
"learning_rate": 2.2068549573403942e-05, |
|
"loss": 2.9088, |
|
"step": 297000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.4068374493985737, |
|
"eval_loss": 3.427147626876831, |
|
"eval_runtime": 154.7655, |
|
"eval_samples_per_second": 374.25, |
|
"eval_steps_per_second": 5.854, |
|
"step": 297520 |
|
}, |
|
{ |
|
"epoch": 16.03, |
|
"grad_norm": 0.9069207310676575, |
|
"learning_rate": 2.177434539570462e-05, |
|
"loss": 2.8909, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 16.08, |
|
"grad_norm": 0.8874634504318237, |
|
"learning_rate": 2.1480435422182994e-05, |
|
"loss": 2.8723, |
|
"step": 299000 |
|
}, |
|
{ |
|
"epoch": 16.13, |
|
"grad_norm": 0.8833279609680176, |
|
"learning_rate": 2.1186231244483672e-05, |
|
"loss": 2.876, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 16.19, |
|
"grad_norm": 0.9021320939064026, |
|
"learning_rate": 2.089202706678435e-05, |
|
"loss": 2.8793, |
|
"step": 301000 |
|
}, |
|
{ |
|
"epoch": 16.24, |
|
"grad_norm": 0.8948860764503479, |
|
"learning_rate": 2.0598117093262727e-05, |
|
"loss": 2.8789, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 16.29, |
|
"grad_norm": 0.8972275257110596, |
|
"learning_rate": 2.03039129155634e-05, |
|
"loss": 2.8778, |
|
"step": 303000 |
|
}, |
|
{ |
|
"epoch": 16.35, |
|
"grad_norm": 0.8987729549407959, |
|
"learning_rate": 2.0009708737864076e-05, |
|
"loss": 2.8827, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"grad_norm": 0.8773179650306702, |
|
"learning_rate": 1.9715798764342457e-05, |
|
"loss": 2.8866, |
|
"step": 305000 |
|
}, |
|
{ |
|
"epoch": 16.46, |
|
"grad_norm": 0.9324170351028442, |
|
"learning_rate": 1.942159458664313e-05, |
|
"loss": 2.8842, |
|
"step": 306000 |
|
}, |
|
{ |
|
"epoch": 16.51, |
|
"grad_norm": 0.9289742708206177, |
|
"learning_rate": 1.9127390408943806e-05, |
|
"loss": 2.8813, |
|
"step": 307000 |
|
}, |
|
{ |
|
"epoch": 16.56, |
|
"grad_norm": 0.9093849062919617, |
|
"learning_rate": 1.8833480435422186e-05, |
|
"loss": 2.8824, |
|
"step": 308000 |
|
}, |
|
{ |
|
"epoch": 16.62, |
|
"grad_norm": 0.9236767888069153, |
|
"learning_rate": 1.853927625772286e-05, |
|
"loss": 2.8845, |
|
"step": 309000 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"grad_norm": 0.8720325827598572, |
|
"learning_rate": 1.8245366284201238e-05, |
|
"loss": 2.8859, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 16.72, |
|
"grad_norm": 0.9025481939315796, |
|
"learning_rate": 1.7951456310679612e-05, |
|
"loss": 2.8921, |
|
"step": 311000 |
|
}, |
|
{ |
|
"epoch": 16.78, |
|
"grad_norm": 0.9536554217338562, |
|
"learning_rate": 1.765725213298029e-05, |
|
"loss": 2.8898, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 16.83, |
|
"grad_norm": 0.909812867641449, |
|
"learning_rate": 1.7363047955280968e-05, |
|
"loss": 2.8901, |
|
"step": 313000 |
|
}, |
|
{ |
|
"epoch": 16.89, |
|
"grad_norm": 0.9195666313171387, |
|
"learning_rate": 1.7068843777581642e-05, |
|
"loss": 2.8897, |
|
"step": 314000 |
|
}, |
|
{ |
|
"epoch": 16.94, |
|
"grad_norm": 0.8690800666809082, |
|
"learning_rate": 1.677463959988232e-05, |
|
"loss": 2.888, |
|
"step": 315000 |
|
}, |
|
{ |
|
"epoch": 16.99, |
|
"grad_norm": 0.8886986374855042, |
|
"learning_rate": 1.6480435422182998e-05, |
|
"loss": 2.8915, |
|
"step": 316000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.4071312885113811, |
|
"eval_loss": 3.424515962600708, |
|
"eval_runtime": 155.1079, |
|
"eval_samples_per_second": 373.424, |
|
"eval_steps_per_second": 5.841, |
|
"step": 316115 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"grad_norm": 0.9326255321502686, |
|
"learning_rate": 1.6186525448661372e-05, |
|
"loss": 2.8626, |
|
"step": 317000 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"grad_norm": 0.9530662298202515, |
|
"learning_rate": 1.5892615475139746e-05, |
|
"loss": 2.8605, |
|
"step": 318000 |
|
}, |
|
{ |
|
"epoch": 17.16, |
|
"grad_norm": 0.9299532771110535, |
|
"learning_rate": 1.5598411297440423e-05, |
|
"loss": 2.8598, |
|
"step": 319000 |
|
}, |
|
{ |
|
"epoch": 17.21, |
|
"grad_norm": 0.9452057480812073, |
|
"learning_rate": 1.53042071197411e-05, |
|
"loss": 2.8637, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 17.26, |
|
"grad_norm": 0.9302668571472168, |
|
"learning_rate": 1.5010591350397176e-05, |
|
"loss": 2.8636, |
|
"step": 321000 |
|
}, |
|
{ |
|
"epoch": 17.32, |
|
"grad_norm": 0.9411625266075134, |
|
"learning_rate": 1.4716387172697854e-05, |
|
"loss": 2.8669, |
|
"step": 322000 |
|
}, |
|
{ |
|
"epoch": 17.37, |
|
"grad_norm": 0.9255250096321106, |
|
"learning_rate": 1.4422182994998529e-05, |
|
"loss": 2.8643, |
|
"step": 323000 |
|
}, |
|
{ |
|
"epoch": 17.42, |
|
"grad_norm": 0.9399036765098572, |
|
"learning_rate": 1.4127978817299207e-05, |
|
"loss": 2.8686, |
|
"step": 324000 |
|
}, |
|
{ |
|
"epoch": 17.48, |
|
"grad_norm": 0.9294721484184265, |
|
"learning_rate": 1.3833774639599883e-05, |
|
"loss": 2.8693, |
|
"step": 325000 |
|
}, |
|
{ |
|
"epoch": 17.53, |
|
"grad_norm": 0.9378616213798523, |
|
"learning_rate": 1.3539864666078258e-05, |
|
"loss": 2.8689, |
|
"step": 326000 |
|
}, |
|
{ |
|
"epoch": 17.59, |
|
"grad_norm": 0.9260066747665405, |
|
"learning_rate": 1.3245954692556634e-05, |
|
"loss": 2.8697, |
|
"step": 327000 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"grad_norm": 0.9042669534683228, |
|
"learning_rate": 1.2951750514857312e-05, |
|
"loss": 2.8717, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 17.69, |
|
"grad_norm": 0.9497202634811401, |
|
"learning_rate": 1.2657546337157988e-05, |
|
"loss": 2.8715, |
|
"step": 329000 |
|
}, |
|
{ |
|
"epoch": 17.75, |
|
"grad_norm": 0.9359462857246399, |
|
"learning_rate": 1.2363342159458664e-05, |
|
"loss": 2.8724, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"grad_norm": 0.9586021900177002, |
|
"learning_rate": 1.2069432185937041e-05, |
|
"loss": 2.8724, |
|
"step": 331000 |
|
}, |
|
{ |
|
"epoch": 17.85, |
|
"grad_norm": 0.92441725730896, |
|
"learning_rate": 1.1775228008237718e-05, |
|
"loss": 2.8739, |
|
"step": 332000 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"grad_norm": 0.9491459727287292, |
|
"learning_rate": 1.1481023830538394e-05, |
|
"loss": 2.8785, |
|
"step": 333000 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"grad_norm": 0.9118024110794067, |
|
"learning_rate": 1.1187113857016771e-05, |
|
"loss": 2.872, |
|
"step": 334000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.4070309908901142, |
|
"eval_loss": 3.426220417022705, |
|
"eval_runtime": 155.4694, |
|
"eval_samples_per_second": 372.556, |
|
"eval_steps_per_second": 5.828, |
|
"step": 334710 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"grad_norm": 0.9377329349517822, |
|
"learning_rate": 1.0892909679317447e-05, |
|
"loss": 2.8661, |
|
"step": 335000 |
|
}, |
|
{ |
|
"epoch": 18.07, |
|
"grad_norm": 0.9395567178726196, |
|
"learning_rate": 1.0598705501618123e-05, |
|
"loss": 2.8488, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 18.12, |
|
"grad_norm": 0.9508826732635498, |
|
"learning_rate": 1.0304795528096499e-05, |
|
"loss": 2.8486, |
|
"step": 337000 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"grad_norm": 0.9117801785469055, |
|
"learning_rate": 1.0010591350397177e-05, |
|
"loss": 2.8487, |
|
"step": 338000 |
|
}, |
|
{ |
|
"epoch": 18.23, |
|
"grad_norm": 0.932178795337677, |
|
"learning_rate": 9.716681376875552e-06, |
|
"loss": 2.8528, |
|
"step": 339000 |
|
}, |
|
{ |
|
"epoch": 18.28, |
|
"grad_norm": 0.9204062223434448, |
|
"learning_rate": 9.422477199176229e-06, |
|
"loss": 2.8579, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 18.34, |
|
"grad_norm": 0.9516026377677917, |
|
"learning_rate": 9.128273021476906e-06, |
|
"loss": 2.8507, |
|
"step": 341000 |
|
}, |
|
{ |
|
"epoch": 18.39, |
|
"grad_norm": 0.9432997703552246, |
|
"learning_rate": 8.834068843777583e-06, |
|
"loss": 2.8571, |
|
"step": 342000 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"grad_norm": 0.9799352288246155, |
|
"learning_rate": 8.539864666078259e-06, |
|
"loss": 2.8517, |
|
"step": 343000 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"grad_norm": 0.9541887044906616, |
|
"learning_rate": 8.245954692556634e-06, |
|
"loss": 2.8557, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 18.55, |
|
"grad_norm": 0.945749044418335, |
|
"learning_rate": 7.951750514857312e-06, |
|
"loss": 2.86, |
|
"step": 345000 |
|
}, |
|
{ |
|
"epoch": 18.61, |
|
"grad_norm": 0.9562386274337769, |
|
"learning_rate": 7.657546337157988e-06, |
|
"loss": 2.8559, |
|
"step": 346000 |
|
}, |
|
{ |
|
"epoch": 18.66, |
|
"grad_norm": 0.9685288071632385, |
|
"learning_rate": 7.363930567814063e-06, |
|
"loss": 2.8546, |
|
"step": 347000 |
|
}, |
|
{ |
|
"epoch": 18.71, |
|
"grad_norm": 0.9271679520606995, |
|
"learning_rate": 7.06972639011474e-06, |
|
"loss": 2.8533, |
|
"step": 348000 |
|
}, |
|
{ |
|
"epoch": 18.77, |
|
"grad_norm": 0.9129212498664856, |
|
"learning_rate": 6.775522212415417e-06, |
|
"loss": 2.8612, |
|
"step": 349000 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"grad_norm": 0.9596994519233704, |
|
"learning_rate": 6.481318034716093e-06, |
|
"loss": 2.8562, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 18.88, |
|
"grad_norm": 0.9252175092697144, |
|
"learning_rate": 6.18711385701677e-06, |
|
"loss": 2.8577, |
|
"step": 351000 |
|
}, |
|
{ |
|
"epoch": 18.93, |
|
"grad_norm": 0.9257832169532776, |
|
"learning_rate": 5.893203883495145e-06, |
|
"loss": 2.8585, |
|
"step": 352000 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"grad_norm": 0.9196575880050659, |
|
"learning_rate": 5.598999705795822e-06, |
|
"loss": 2.8514, |
|
"step": 353000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.4072752522102257, |
|
"eval_loss": 3.4321706295013428, |
|
"eval_runtime": 155.3795, |
|
"eval_samples_per_second": 372.771, |
|
"eval_steps_per_second": 5.831, |
|
"step": 353305 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"grad_norm": 0.9688698649406433, |
|
"learning_rate": 5.305089732274199e-06, |
|
"loss": 2.8452, |
|
"step": 354000 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"grad_norm": 0.9426762461662292, |
|
"learning_rate": 5.010885554574876e-06, |
|
"loss": 2.8401, |
|
"step": 355000 |
|
}, |
|
{ |
|
"epoch": 19.14, |
|
"grad_norm": 0.9447112679481506, |
|
"learning_rate": 4.716975581053251e-06, |
|
"loss": 2.839, |
|
"step": 356000 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"grad_norm": 0.9574055671691895, |
|
"learning_rate": 4.423065607531627e-06, |
|
"loss": 2.8447, |
|
"step": 357000 |
|
}, |
|
{ |
|
"epoch": 19.25, |
|
"grad_norm": 0.9574621319770813, |
|
"learning_rate": 4.128861429832304e-06, |
|
"loss": 2.8391, |
|
"step": 358000 |
|
}, |
|
{ |
|
"epoch": 19.31, |
|
"grad_norm": 0.9345229268074036, |
|
"learning_rate": 3.83465725213298e-06, |
|
"loss": 2.8444, |
|
"step": 359000 |
|
}, |
|
{ |
|
"epoch": 19.36, |
|
"grad_norm": 0.9420320391654968, |
|
"learning_rate": 3.5404530744336568e-06, |
|
"loss": 2.8459, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 19.41, |
|
"grad_norm": 0.9578850865364075, |
|
"learning_rate": 3.2465431009120332e-06, |
|
"loss": 2.8409, |
|
"step": 361000 |
|
}, |
|
{ |
|
"epoch": 19.47, |
|
"grad_norm": 0.963036060333252, |
|
"learning_rate": 2.95233892321271e-06, |
|
"loss": 2.8392, |
|
"step": 362000 |
|
}, |
|
{ |
|
"epoch": 19.52, |
|
"grad_norm": 0.9696721434593201, |
|
"learning_rate": 2.6581347455133864e-06, |
|
"loss": 2.8451, |
|
"step": 363000 |
|
}, |
|
{ |
|
"epoch": 19.58, |
|
"grad_norm": 0.9655833840370178, |
|
"learning_rate": 2.3642247719917624e-06, |
|
"loss": 2.8446, |
|
"step": 364000 |
|
}, |
|
{ |
|
"epoch": 19.63, |
|
"grad_norm": 0.9735895395278931, |
|
"learning_rate": 2.070314798470138e-06, |
|
"loss": 2.8436, |
|
"step": 365000 |
|
}, |
|
{ |
|
"epoch": 19.68, |
|
"grad_norm": 0.9257269501686096, |
|
"learning_rate": 1.7761106207708148e-06, |
|
"loss": 2.8413, |
|
"step": 366000 |
|
}, |
|
{ |
|
"epoch": 19.74, |
|
"grad_norm": 0.9528219103813171, |
|
"learning_rate": 1.482200647249191e-06, |
|
"loss": 2.8403, |
|
"step": 367000 |
|
}, |
|
{ |
|
"epoch": 19.79, |
|
"grad_norm": 0.9575457572937012, |
|
"learning_rate": 1.1879964695498676e-06, |
|
"loss": 2.8421, |
|
"step": 368000 |
|
}, |
|
{ |
|
"epoch": 19.84, |
|
"grad_norm": 0.9463661909103394, |
|
"learning_rate": 8.937922918505443e-07, |
|
"loss": 2.8405, |
|
"step": 369000 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"grad_norm": 0.9508777856826782, |
|
"learning_rate": 5.99588114151221e-07, |
|
"loss": 2.842, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 19.95, |
|
"grad_norm": 0.93899005651474, |
|
"learning_rate": 3.0567814062959694e-07, |
|
"loss": 2.8467, |
|
"step": 371000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.40723568402608223, |
|
"eval_loss": 3.4373319149017334, |
|
"eval_runtime": 154.9871, |
|
"eval_samples_per_second": 373.715, |
|
"eval_steps_per_second": 5.846, |
|
"step": 371900 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 371900, |
|
"total_flos": 1.5669257538816e+18, |
|
"train_loss": 3.1537178330448374, |
|
"train_runtime": 81513.9803, |
|
"train_samples_per_second": 145.996, |
|
"train_steps_per_second": 4.562 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 371900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 5000, |
|
"total_flos": 1.5669257538816e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|