{ "best_metric": null, "best_model_checkpoint": null, "epoch": 9.906542056074766, "eval_steps": 500, "global_step": 530, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.018691588785046728, "grad_norm": 456.0, "learning_rate": 3.7735849056603773e-06, "loss": 46.1216, "step": 1 }, { "epoch": 0.09345794392523364, "grad_norm": 438.0, "learning_rate": 1.8867924528301888e-05, "loss": 43.7935, "step": 5 }, { "epoch": 0.18691588785046728, "grad_norm": 302.0, "learning_rate": 3.7735849056603776e-05, "loss": 37.6584, "step": 10 }, { "epoch": 0.2803738317757009, "grad_norm": 41.0, "learning_rate": 5.660377358490566e-05, "loss": 21.0859, "step": 15 }, { "epoch": 0.37383177570093457, "grad_norm": 30.125, "learning_rate": 7.547169811320755e-05, "loss": 17.8237, "step": 20 }, { "epoch": 0.4672897196261682, "grad_norm": 14.5, "learning_rate": 9.433962264150944e-05, "loss": 15.4732, "step": 25 }, { "epoch": 0.5607476635514018, "grad_norm": 6.1875, "learning_rate": 0.00011320754716981132, "loss": 14.2199, "step": 30 }, { "epoch": 0.6542056074766355, "grad_norm": 6.8125, "learning_rate": 0.0001320754716981132, "loss": 13.4105, "step": 35 }, { "epoch": 0.7476635514018691, "grad_norm": 18.75, "learning_rate": 0.0001509433962264151, "loss": 12.0055, "step": 40 }, { "epoch": 0.8411214953271028, "grad_norm": 42.0, "learning_rate": 0.000169811320754717, "loss": 9.0376, "step": 45 }, { "epoch": 0.9345794392523364, "grad_norm": 5.125, "learning_rate": 0.00018867924528301889, "loss": 2.8367, "step": 50 }, { "epoch": 0.9906542056074766, "eval_loss": 2.7326531410217285, "eval_runtime": 1.0199, "eval_samples_per_second": 4.902, "eval_steps_per_second": 1.961, "step": 53 }, { "epoch": 1.02803738317757, "grad_norm": 2.90625, "learning_rate": 0.00019999132465602527, "loss": 1.6892, "step": 55 }, { "epoch": 1.1214953271028036, "grad_norm": 1.875, "learning_rate": 0.0001998937443221316, "loss": 1.4752, "step": 60 }, { "epoch": 1.2149532710280373, "grad_norm": 1.3671875, "learning_rate": 0.00019968784563700586, "loss": 1.3666, "step": 65 }, { "epoch": 1.308411214953271, "grad_norm": 1.171875, "learning_rate": 0.00019937385186393888, "loss": 1.251, "step": 70 }, { "epoch": 1.4018691588785046, "grad_norm": 1.5078125, "learning_rate": 0.0001989521034775823, "loss": 1.2121, "step": 75 }, { "epoch": 1.4953271028037383, "grad_norm": 1.7734375, "learning_rate": 0.00019842305779475968, "loss": 1.1493, "step": 80 }, { "epoch": 1.588785046728972, "grad_norm": 6.40625, "learning_rate": 0.0001977872884785815, "loss": 1.149, "step": 85 }, { "epoch": 1.6822429906542056, "grad_norm": 1.7109375, "learning_rate": 0.00019704548491640192, "loss": 1.0847, "step": 90 }, { "epoch": 1.7757009345794392, "grad_norm": 2.75, "learning_rate": 0.00019619845147229138, "loss": 1.0808, "step": 95 }, { "epoch": 1.8691588785046729, "grad_norm": 10.75, "learning_rate": 0.00019524710661483592, "loss": 1.011, "step": 100 }, { "epoch": 1.9626168224299065, "grad_norm": 5.25, "learning_rate": 0.00019419248192120898, "loss": 1.0189, "step": 105 }, { "epoch": 2.0, "eval_loss": 2.324605703353882, "eval_runtime": 1.0183, "eval_samples_per_second": 4.91, "eval_steps_per_second": 1.964, "step": 107 }, { "epoch": 2.05607476635514, "grad_norm": 1.8046875, "learning_rate": 0.00019303572095859546, "loss": 0.964, "step": 110 }, { "epoch": 2.149532710280374, "grad_norm": 1.9453125, "learning_rate": 0.000191778078044181, "loss": 0.9563, "step": 115 }, { "epoch": 2.2429906542056073, "grad_norm": 2.5625, "learning_rate": 0.00019042091688505105, "loss": 0.9415, "step": 120 }, { "epoch": 2.336448598130841, "grad_norm": 3.265625, "learning_rate": 0.00018896570909947475, "loss": 0.9123, "step": 125 }, { "epoch": 2.4299065420560746, "grad_norm": 4.75, "learning_rate": 0.00018741403262117657, "loss": 0.9691, "step": 130 }, { "epoch": 2.5233644859813085, "grad_norm": 0.93359375, "learning_rate": 0.00018576756998832665, "loss": 0.8991, "step": 135 }, { "epoch": 2.616822429906542, "grad_norm": 2.71875, "learning_rate": 0.00018402810651910442, "loss": 0.8936, "step": 140 }, { "epoch": 2.710280373831776, "grad_norm": 3.671875, "learning_rate": 0.00018219752837581466, "loss": 0.8844, "step": 145 }, { "epoch": 2.803738317757009, "grad_norm": 1.859375, "learning_rate": 0.00018027782051965407, "loss": 0.9217, "step": 150 }, { "epoch": 2.897196261682243, "grad_norm": 0.875, "learning_rate": 0.00017827106455834733, "loss": 0.9125, "step": 155 }, { "epoch": 2.9906542056074765, "grad_norm": 0.74609375, "learning_rate": 0.0001761794364889855, "loss": 0.8788, "step": 160 }, { "epoch": 2.9906542056074765, "eval_loss": 2.2786262035369873, "eval_runtime": 1.0188, "eval_samples_per_second": 4.908, "eval_steps_per_second": 1.963, "step": 160 }, { "epoch": 3.0841121495327104, "grad_norm": 0.609375, "learning_rate": 0.00017400520433851456, "loss": 0.8796, "step": 165 }, { "epoch": 3.177570093457944, "grad_norm": 1.0703125, "learning_rate": 0.00017175072570443312, "loss": 0.8224, "step": 170 }, { "epoch": 3.2710280373831777, "grad_norm": 1.1953125, "learning_rate": 0.00016941844519836508, "loss": 0.8814, "step": 175 }, { "epoch": 3.364485981308411, "grad_norm": 0.703125, "learning_rate": 0.00016701089179528032, "loss": 0.8648, "step": 180 }, { "epoch": 3.457943925233645, "grad_norm": 1.1796875, "learning_rate": 0.00016453067609123654, "loss": 0.804, "step": 185 }, { "epoch": 3.5514018691588785, "grad_norm": 0.70703125, "learning_rate": 0.00016198048747261709, "loss": 0.8115, "step": 190 }, { "epoch": 3.6448598130841123, "grad_norm": 0.93359375, "learning_rate": 0.0001593630911999333, "loss": 0.7971, "step": 195 }, { "epoch": 3.7383177570093458, "grad_norm": 1.3203125, "learning_rate": 0.0001566813254093538, "loss": 0.8242, "step": 200 }, { "epoch": 3.831775700934579, "grad_norm": 2.359375, "learning_rate": 0.00015393809803521214, "loss": 0.8288, "step": 205 }, { "epoch": 3.925233644859813, "grad_norm": 2.0, "learning_rate": 0.00015113638365682995, "loss": 0.8104, "step": 210 }, { "epoch": 4.0, "eval_loss": 2.2753615379333496, "eval_runtime": 1.0175, "eval_samples_per_second": 4.914, "eval_steps_per_second": 1.966, "step": 214 }, { "epoch": 4.018691588785047, "grad_norm": 1.078125, "learning_rate": 0.00014827922027307451, "loss": 0.82, "step": 215 }, { "epoch": 4.11214953271028, "grad_norm": 0.8125, "learning_rate": 0.00014536970600814787, "loss": 0.7523, "step": 220 }, { "epoch": 4.205607476635514, "grad_norm": 1.1171875, "learning_rate": 0.0001424109957521806, "loss": 0.772, "step": 225 }, { "epoch": 4.299065420560748, "grad_norm": 1.515625, "learning_rate": 0.0001394062977402717, "loss": 0.7393, "step": 230 }, { "epoch": 4.392523364485982, "grad_norm": 4.0625, "learning_rate": 0.00013635887007368467, "loss": 0.7492, "step": 235 }, { "epoch": 4.485981308411215, "grad_norm": 1.3203125, "learning_rate": 0.0001332720171869723, "loss": 0.7598, "step": 240 }, { "epoch": 4.579439252336448, "grad_norm": 0.87109375, "learning_rate": 0.0001301490862648603, "loss": 0.7433, "step": 245 }, { "epoch": 4.672897196261682, "grad_norm": 1.5, "learning_rate": 0.0001269934636127754, "loss": 0.7615, "step": 250 }, { "epoch": 4.766355140186916, "grad_norm": 1.1875, "learning_rate": 0.00012380857098495356, "loss": 0.7309, "step": 255 }, { "epoch": 4.859813084112149, "grad_norm": 0.921875, "learning_rate": 0.00012059786187410984, "loss": 0.7343, "step": 260 }, { "epoch": 4.953271028037383, "grad_norm": 0.9296875, "learning_rate": 0.00011736481776669306, "loss": 0.7626, "step": 265 }, { "epoch": 4.990654205607477, "eval_loss": 2.320525884628296, "eval_runtime": 1.018, "eval_samples_per_second": 4.912, "eval_steps_per_second": 1.965, "step": 267 }, { "epoch": 5.046728971962617, "grad_norm": 0.671875, "learning_rate": 0.00011411294436778561, "loss": 0.6804, "step": 270 }, { "epoch": 5.140186915887851, "grad_norm": 0.7265625, "learning_rate": 0.00011084576779974257, "loss": 0.662, "step": 275 }, { "epoch": 5.233644859813084, "grad_norm": 0.8203125, "learning_rate": 0.00010756683077869132, "loss": 0.6635, "step": 280 }, { "epoch": 5.327102803738318, "grad_norm": 0.76953125, "learning_rate": 0.00010427968877303808, "loss": 0.6852, "step": 285 }, { "epoch": 5.420560747663552, "grad_norm": 1.0546875, "learning_rate": 0.00010098790614814658, "loss": 0.6885, "step": 290 }, { "epoch": 5.5140186915887845, "grad_norm": 1.046875, "learning_rate": 9.769505230136962e-05, "loss": 0.6665, "step": 295 }, { "epoch": 5.607476635514018, "grad_norm": 0.9453125, "learning_rate": 9.440469779162407e-05, "loss": 0.6704, "step": 300 }, { "epoch": 5.700934579439252, "grad_norm": 0.7890625, "learning_rate": 9.112041046770653e-05, "loss": 0.7145, "step": 305 }, { "epoch": 5.794392523364486, "grad_norm": 0.8203125, "learning_rate": 8.784575159954748e-05, "loss": 0.6567, "step": 310 }, { "epoch": 5.88785046728972, "grad_norm": 0.79296875, "learning_rate": 8.458427201659927e-05, "loss": 0.6752, "step": 315 }, { "epoch": 5.981308411214953, "grad_norm": 1.046875, "learning_rate": 8.13395082575451e-05, "loss": 0.6665, "step": 320 }, { "epoch": 6.0, "eval_loss": 2.3902792930603027, "eval_runtime": 1.0171, "eval_samples_per_second": 4.916, "eval_steps_per_second": 1.966, "step": 321 }, { "epoch": 6.074766355140187, "grad_norm": 0.859375, "learning_rate": 7.81149787355039e-05, "loss": 0.6076, "step": 325 }, { "epoch": 6.168224299065421, "grad_norm": 1.0625, "learning_rate": 7.491417992288927e-05, "loss": 0.5788, "step": 330 }, { "epoch": 6.261682242990654, "grad_norm": 0.8359375, "learning_rate": 7.174058256006012e-05, "loss": 0.6007, "step": 335 }, { "epoch": 6.355140186915888, "grad_norm": 0.79296875, "learning_rate": 6.859762789187259e-05, "loss": 0.6096, "step": 340 }, { "epoch": 6.4485981308411215, "grad_norm": 0.8671875, "learning_rate": 6.548872393621578e-05, "loss": 0.5978, "step": 345 }, { "epoch": 6.542056074766355, "grad_norm": 0.79296875, "learning_rate": 6.24172417885762e-05, "loss": 0.6354, "step": 350 }, { "epoch": 6.635514018691588, "grad_norm": 0.76171875, "learning_rate": 5.938651196663865e-05, "loss": 0.6094, "step": 355 }, { "epoch": 6.728971962616822, "grad_norm": 0.859375, "learning_rate": 5.639982079888726e-05, "loss": 0.5985, "step": 360 }, { "epoch": 6.822429906542056, "grad_norm": 0.765625, "learning_rate": 5.3460406861121894e-05, "loss": 0.5846, "step": 365 }, { "epoch": 6.91588785046729, "grad_norm": 0.9140625, "learning_rate": 5.0571457464755226e-05, "loss": 0.6015, "step": 370 }, { "epoch": 6.990654205607477, "eval_loss": 2.4630489349365234, "eval_runtime": 1.0189, "eval_samples_per_second": 4.907, "eval_steps_per_second": 1.963, "step": 374 }, { "epoch": 7.009345794392523, "grad_norm": 0.87109375, "learning_rate": 4.7736105200697056e-05, "loss": 0.6055, "step": 375 }, { "epoch": 7.102803738317757, "grad_norm": 0.93359375, "learning_rate": 4.495742454257418e-05, "loss": 0.5464, "step": 380 }, { "epoch": 7.196261682242991, "grad_norm": 0.93359375, "learning_rate": 4.2238428512969064e-05, "loss": 0.5556, "step": 385 }, { "epoch": 7.289719626168225, "grad_norm": 0.83984375, "learning_rate": 3.9582065416291926e-05, "loss": 0.548, "step": 390 }, { "epoch": 7.383177570093458, "grad_norm": 1.0390625, "learning_rate": 3.69912156418289e-05, "loss": 0.5074, "step": 395 }, { "epoch": 7.4766355140186915, "grad_norm": 0.80859375, "learning_rate": 3.4468688540433425e-05, "loss": 0.5497, "step": 400 }, { "epoch": 7.570093457943925, "grad_norm": 0.8359375, "learning_rate": 3.201721937824673e-05, "loss": 0.5436, "step": 405 }, { "epoch": 7.663551401869158, "grad_norm": 0.86328125, "learning_rate": 2.9639466370751068e-05, "loss": 0.5627, "step": 410 }, { "epoch": 7.757009345794392, "grad_norm": 0.890625, "learning_rate": 2.7338007800372024e-05, "loss": 0.566, "step": 415 }, { "epoch": 7.850467289719626, "grad_norm": 0.9296875, "learning_rate": 2.5115339220754797e-05, "loss": 0.557, "step": 420 }, { "epoch": 7.94392523364486, "grad_norm": 0.84765625, "learning_rate": 2.297387075074625e-05, "loss": 0.5699, "step": 425 }, { "epoch": 8.0, "eval_loss": 2.583679676055908, "eval_runtime": 1.0173, "eval_samples_per_second": 4.915, "eval_steps_per_second": 1.966, "step": 428 }, { "epoch": 8.037383177570094, "grad_norm": 0.796875, "learning_rate": 2.09159244610172e-05, "loss": 0.5156, "step": 430 }, { "epoch": 8.130841121495328, "grad_norm": 0.828125, "learning_rate": 1.89437318561583e-05, "loss": 0.509, "step": 435 }, { "epoch": 8.22429906542056, "grad_norm": 0.87109375, "learning_rate": 1.7059431454979824e-05, "loss": 0.5226, "step": 440 }, { "epoch": 8.317757009345794, "grad_norm": 0.8046875, "learning_rate": 1.52650664716397e-05, "loss": 0.5088, "step": 445 }, { "epoch": 8.411214953271028, "grad_norm": 0.82421875, "learning_rate": 1.3562582600113294e-05, "loss": 0.4948, "step": 450 }, { "epoch": 8.504672897196262, "grad_norm": 0.8203125, "learning_rate": 1.1953825904408034e-05, "loss": 0.5357, "step": 455 }, { "epoch": 8.598130841121495, "grad_norm": 0.8203125, "learning_rate": 1.0440540816810395e-05, "loss": 0.551, "step": 460 }, { "epoch": 8.69158878504673, "grad_norm": 0.91796875, "learning_rate": 9.024368246335735e-06, "loss": 0.5092, "step": 465 }, { "epoch": 8.785046728971963, "grad_norm": 0.83203125, "learning_rate": 7.706843799431984e-06, "loss": 0.5244, "step": 470 }, { "epoch": 8.878504672897197, "grad_norm": 0.8671875, "learning_rate": 6.489396114866941e-06, "loss": 0.5235, "step": 475 }, { "epoch": 8.97196261682243, "grad_norm": 0.8203125, "learning_rate": 5.373345314604205e-06, "loss": 0.5017, "step": 480 }, { "epoch": 8.990654205607477, "eval_loss": 2.6300225257873535, "eval_runtime": 1.0187, "eval_samples_per_second": 4.908, "eval_steps_per_second": 1.963, "step": 481 }, { "epoch": 9.065420560747663, "grad_norm": 0.81640625, "learning_rate": 4.359901572347758e-06, "loss": 0.5185, "step": 485 }, { "epoch": 9.158878504672897, "grad_norm": 0.84765625, "learning_rate": 3.450163801307582e-06, "loss": 0.5366, "step": 490 }, { "epoch": 9.25233644859813, "grad_norm": 0.796875, "learning_rate": 2.6451184626087644e-06, "loss": 0.5114, "step": 495 }, { "epoch": 9.345794392523365, "grad_norm": 0.78125, "learning_rate": 1.945638495636515e-06, "loss": 0.5138, "step": 500 }, { "epoch": 9.439252336448599, "grad_norm": 0.8203125, "learning_rate": 1.3524823714768375e-06, "loss": 0.486, "step": 505 }, { "epoch": 9.532710280373832, "grad_norm": 0.84375, "learning_rate": 8.662932704792792e-07, "loss": 0.5139, "step": 510 }, { "epoch": 9.626168224299064, "grad_norm": 0.7890625, "learning_rate": 4.875983848335874e-07, "loss": 0.5092, "step": 515 }, { "epoch": 9.719626168224298, "grad_norm": 0.7890625, "learning_rate": 2.1680834691628626e-07, "loss": 0.4957, "step": 520 }, { "epoch": 9.813084112149532, "grad_norm": 0.8125, "learning_rate": 5.421678402741659e-08, "loss": 0.5253, "step": 525 }, { "epoch": 9.906542056074766, "grad_norm": 0.82421875, "learning_rate": 0.0, "loss": 0.4969, "step": 530 }, { "epoch": 9.906542056074766, "eval_loss": 2.6219420433044434, "eval_runtime": 1.0178, "eval_samples_per_second": 4.912, "eval_steps_per_second": 1.965, "step": 530 }, { "epoch": 9.906542056074766, "step": 530, "total_flos": 8.09000149933097e+17, "train_loss": 2.438210007829486, "train_runtime": 3271.6973, "train_samples_per_second": 2.613, "train_steps_per_second": 0.162 } ], "logging_steps": 5, "max_steps": 530, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 100, "total_flos": 8.09000149933097e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }