|
{ |
|
"best_metric": 0.5924356579780579, |
|
"best_model_checkpoint": "ckpt/llama2_13b_other/linguistics_puzzles_no_sys/checkpoint-500", |
|
"epoch": 5.0, |
|
"eval_steps": 100, |
|
"global_step": 950, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 1.5050264596939087, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.5922, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 1.5525988340377808, |
|
"learning_rate": 5e-05, |
|
"loss": 2.3206, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 1.7404705286026, |
|
"learning_rate": 4.998573727324295e-05, |
|
"loss": 1.7229, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 1.8962088823318481, |
|
"learning_rate": 4.994296536700177e-05, |
|
"loss": 1.3729, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 1.776729941368103, |
|
"learning_rate": 4.987173308479738e-05, |
|
"loss": 1.3635, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 11.020795822143555, |
|
"learning_rate": 4.977212170395598e-05, |
|
"loss": 1.3315, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 2.192176580429077, |
|
"learning_rate": 4.964424488287009e-05, |
|
"loss": 1.2515, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 2.4063496589660645, |
|
"learning_rate": 4.948824853131236e-05, |
|
"loss": 1.1872, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 2.7862613201141357, |
|
"learning_rate": 4.930431064394977e-05, |
|
"loss": 1.1552, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 3.5330026149749756, |
|
"learning_rate": 4.909264109724853e-05, |
|
"loss": 1.1276, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"eval_loss": 1.0876480340957642, |
|
"eval_runtime": 1.9022, |
|
"eval_samples_per_second": 42.057, |
|
"eval_steps_per_second": 5.257, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 2.4774415493011475, |
|
"learning_rate": 4.885348141000122e-05, |
|
"loss": 1.1756, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 2.380500555038452, |
|
"learning_rate": 4.858710446774951e-05, |
|
"loss": 1.1106, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 3.0656540393829346, |
|
"learning_rate": 4.829381421141671e-05, |
|
"loss": 1.0175, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 5.256251811981201, |
|
"learning_rate": 4.7973945290505766e-05, |
|
"loss": 0.9733, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 2.674135446548462, |
|
"learning_rate": 4.7627862681258037e-05, |
|
"loss": 0.9907, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 3.5206069946289062, |
|
"learning_rate": 4.725596127020879e-05, |
|
"loss": 0.9312, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 3.4086978435516357, |
|
"learning_rate": 4.685866540361456e-05, |
|
"loss": 0.9586, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 4.591642379760742, |
|
"learning_rate": 4.643642840326627e-05, |
|
"loss": 0.9595, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 2.8823249340057373, |
|
"learning_rate": 4.598973204924097e-05, |
|
"loss": 0.8331, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"grad_norm": 3.7064428329467773, |
|
"learning_rate": 4.551908603018191e-05, |
|
"loss": 0.8128, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"eval_loss": 0.8153461217880249, |
|
"eval_runtime": 1.9192, |
|
"eval_samples_per_second": 41.684, |
|
"eval_steps_per_second": 5.21, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.1052631578947367, |
|
"grad_norm": 4.2386274337768555, |
|
"learning_rate": 4.502502736173462e-05, |
|
"loss": 0.8186, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.1578947368421053, |
|
"grad_norm": 3.1767256259918213, |
|
"learning_rate": 4.45081197738023e-05, |
|
"loss": 0.6895, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.2105263157894737, |
|
"grad_norm": 3.748518466949463, |
|
"learning_rate": 4.3968953067319777e-05, |
|
"loss": 0.7901, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 3.807053565979004, |
|
"learning_rate": 4.340814244127993e-05, |
|
"loss": 0.704, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"grad_norm": 5.013542175292969, |
|
"learning_rate": 4.282632779078051e-05, |
|
"loss": 0.6879, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.368421052631579, |
|
"grad_norm": 4.752715110778809, |
|
"learning_rate": 4.222417297689217e-05, |
|
"loss": 0.7563, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.4210526315789473, |
|
"grad_norm": 3.6476950645446777, |
|
"learning_rate": 4.160236506918098e-05, |
|
"loss": 0.6846, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4736842105263157, |
|
"grad_norm": 3.8758108615875244, |
|
"learning_rate": 4.096161356174959e-05, |
|
"loss": 0.7155, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.526315789473684, |
|
"grad_norm": 4.166601657867432, |
|
"learning_rate": 4.030264956369157e-05, |
|
"loss": 0.8037, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.5789473684210527, |
|
"grad_norm": 4.603171348571777, |
|
"learning_rate": 3.962622496488269e-05, |
|
"loss": 0.6705, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5789473684210527, |
|
"eval_loss": 0.6891714930534363, |
|
"eval_runtime": 1.9174, |
|
"eval_samples_per_second": 41.724, |
|
"eval_steps_per_second": 5.216, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.631578947368421, |
|
"grad_norm": 3.820142984390259, |
|
"learning_rate": 3.893311157806091e-05, |
|
"loss": 0.6389, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 5.900814533233643, |
|
"learning_rate": 3.822410025817406e-05, |
|
"loss": 0.7223, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.736842105263158, |
|
"grad_norm": 4.315140724182129, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.6948, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.7894736842105263, |
|
"grad_norm": 4.747324466705322, |
|
"learning_rate": 3.67616370150689e-05, |
|
"loss": 0.6658, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.8421052631578947, |
|
"grad_norm": 3.504014492034912, |
|
"learning_rate": 3.600985378894086e-05, |
|
"loss": 0.643, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.8947368421052633, |
|
"grad_norm": 5.181077480316162, |
|
"learning_rate": 3.5245508119914687e-05, |
|
"loss": 0.6537, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.9473684210526314, |
|
"grad_norm": 5.073149681091309, |
|
"learning_rate": 3.44694721402644e-05, |
|
"loss": 0.641, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 5.070895671844482, |
|
"learning_rate": 3.3682631321120504e-05, |
|
"loss": 0.6708, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.0526315789473686, |
|
"grad_norm": 5.305852890014648, |
|
"learning_rate": 3.2885883462131394e-05, |
|
"loss": 0.5061, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 6.452213287353516, |
|
"learning_rate": 3.2080137667057595e-05, |
|
"loss": 0.4876, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"eval_loss": 0.6224929690361023, |
|
"eval_runtime": 1.9167, |
|
"eval_samples_per_second": 41.739, |
|
"eval_steps_per_second": 5.217, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.1578947368421053, |
|
"grad_norm": 3.6080775260925293, |
|
"learning_rate": 3.126631330646802e-05, |
|
"loss": 0.485, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.2105263157894735, |
|
"grad_norm": 2.2630574703216553, |
|
"learning_rate": 3.0445338968721287e-05, |
|
"loss": 0.536, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.263157894736842, |
|
"grad_norm": 4.616273880004883, |
|
"learning_rate": 2.961815140042974e-05, |
|
"loss": 0.4493, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.3157894736842106, |
|
"grad_norm": 4.5297956466674805, |
|
"learning_rate": 2.878569443761442e-05, |
|
"loss": 0.4806, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.3684210526315788, |
|
"grad_norm": 4.910376071929932, |
|
"learning_rate": 2.7948917928771158e-05, |
|
"loss": 0.4642, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.4210526315789473, |
|
"grad_norm": 4.3276801109313965, |
|
"learning_rate": 2.7108776651076118e-05, |
|
"loss": 0.4857, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.473684210526316, |
|
"grad_norm": 3.657116413116455, |
|
"learning_rate": 2.6266229220967818e-05, |
|
"loss": 0.4604, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 4.7539896965026855, |
|
"learning_rate": 2.5422237000348276e-05, |
|
"loss": 0.4294, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.5789473684210527, |
|
"grad_norm": 4.227921962738037, |
|
"learning_rate": 2.4577762999651726e-05, |
|
"loss": 0.436, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.6315789473684212, |
|
"grad_norm": 6.821872234344482, |
|
"learning_rate": 2.3733770779032184e-05, |
|
"loss": 0.4435, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.6315789473684212, |
|
"eval_loss": 0.5924356579780579, |
|
"eval_runtime": 1.9193, |
|
"eval_samples_per_second": 41.683, |
|
"eval_steps_per_second": 5.21, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.6842105263157894, |
|
"grad_norm": 4.023755073547363, |
|
"learning_rate": 2.2891223348923884e-05, |
|
"loss": 0.4128, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.736842105263158, |
|
"grad_norm": 4.245009899139404, |
|
"learning_rate": 2.2051082071228854e-05, |
|
"loss": 0.4201, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.7894736842105265, |
|
"grad_norm": 7.485212326049805, |
|
"learning_rate": 2.1214305562385592e-05, |
|
"loss": 0.4144, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.8421052631578947, |
|
"grad_norm": 3.890044689178467, |
|
"learning_rate": 2.0381848599570276e-05, |
|
"loss": 0.4325, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.8947368421052633, |
|
"grad_norm": 5.785126686096191, |
|
"learning_rate": 1.9554661031278712e-05, |
|
"loss": 0.4539, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 3.959681272506714, |
|
"learning_rate": 1.8733686693531985e-05, |
|
"loss": 0.3898, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 6.1470160484313965, |
|
"learning_rate": 1.79198623329424e-05, |
|
"loss": 0.4347, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.0526315789473686, |
|
"grad_norm": 6.080893039703369, |
|
"learning_rate": 1.711411653786861e-05, |
|
"loss": 0.2771, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 3.1052631578947367, |
|
"grad_norm": 3.995936155319214, |
|
"learning_rate": 1.6317368678879495e-05, |
|
"loss": 0.2786, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"grad_norm": 4.9943084716796875, |
|
"learning_rate": 1.55305278597356e-05, |
|
"loss": 0.2743, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"eval_loss": 0.6151354908943176, |
|
"eval_runtime": 1.9185, |
|
"eval_samples_per_second": 41.7, |
|
"eval_steps_per_second": 5.212, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.2105263157894735, |
|
"grad_norm": 3.650193452835083, |
|
"learning_rate": 1.475449188008532e-05, |
|
"loss": 0.2611, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.263157894736842, |
|
"grad_norm": 3.5425643920898438, |
|
"learning_rate": 1.399014621105914e-05, |
|
"loss": 0.237, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.3157894736842106, |
|
"grad_norm": 4.187167644500732, |
|
"learning_rate": 1.3238362984931113e-05, |
|
"loss": 0.2319, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.3684210526315788, |
|
"grad_norm": 3.7174108028411865, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 0.2785, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.4210526315789473, |
|
"grad_norm": 4.665218353271484, |
|
"learning_rate": 1.1775899741825947e-05, |
|
"loss": 0.3323, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.473684210526316, |
|
"grad_norm": 6.711315631866455, |
|
"learning_rate": 1.1066888421939093e-05, |
|
"loss": 0.2762, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.526315789473684, |
|
"grad_norm": 4.101406097412109, |
|
"learning_rate": 1.0373775035117305e-05, |
|
"loss": 0.2982, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.5789473684210527, |
|
"grad_norm": 3.3571157455444336, |
|
"learning_rate": 9.697350436308427e-06, |
|
"loss": 0.2338, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.6315789473684212, |
|
"grad_norm": 7.152629852294922, |
|
"learning_rate": 9.038386438250415e-06, |
|
"loss": 0.2962, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 3.6842105263157894, |
|
"grad_norm": 5.147871971130371, |
|
"learning_rate": 8.397634930819021e-06, |
|
"loss": 0.2846, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.6842105263157894, |
|
"eval_loss": 0.6083844900131226, |
|
"eval_runtime": 1.9199, |
|
"eval_samples_per_second": 41.67, |
|
"eval_steps_per_second": 5.209, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.736842105263158, |
|
"grad_norm": 3.984264373779297, |
|
"learning_rate": 7.775827023107835e-06, |
|
"loss": 0.2895, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 3.7894736842105265, |
|
"grad_norm": 6.230710983276367, |
|
"learning_rate": 7.173672209219495e-06, |
|
"loss": 0.3261, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.8421052631578947, |
|
"grad_norm": 3.685063362121582, |
|
"learning_rate": 6.591857558720071e-06, |
|
"loss": 0.2358, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 3.8947368421052633, |
|
"grad_norm": 4.337435245513916, |
|
"learning_rate": 6.031046932680229e-06, |
|
"loss": 0.2723, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.9473684210526314, |
|
"grad_norm": 4.504445552825928, |
|
"learning_rate": 5.491880226197707e-06, |
|
"loss": 0.2941, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 4.7959442138671875, |
|
"learning_rate": 4.9749726382653905e-06, |
|
"loss": 0.2721, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 4.052631578947368, |
|
"grad_norm": 2.663322925567627, |
|
"learning_rate": 4.480913969818098e-06, |
|
"loss": 0.1677, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 4.105263157894737, |
|
"grad_norm": 5.704188346862793, |
|
"learning_rate": 4.010267950759025e-06, |
|
"loss": 0.2291, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 4.157894736842105, |
|
"grad_norm": 4.857370853424072, |
|
"learning_rate": 3.5635715967337223e-06, |
|
"loss": 0.1991, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"grad_norm": 2.6290528774261475, |
|
"learning_rate": 3.141334596385448e-06, |
|
"loss": 0.2069, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"eval_loss": 0.6427180767059326, |
|
"eval_runtime": 1.9195, |
|
"eval_samples_per_second": 41.677, |
|
"eval_steps_per_second": 5.21, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.2631578947368425, |
|
"grad_norm": 6.7939558029174805, |
|
"learning_rate": 2.7440387297912123e-06, |
|
"loss": 0.2213, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 4.315789473684211, |
|
"grad_norm": 5.425328731536865, |
|
"learning_rate": 2.372137318741968e-06, |
|
"loss": 0.2008, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 4.368421052631579, |
|
"grad_norm": 3.0159809589385986, |
|
"learning_rate": 2.026054709494235e-06, |
|
"loss": 0.2178, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 4.421052631578947, |
|
"grad_norm": 4.54276704788208, |
|
"learning_rate": 1.7061857885832893e-06, |
|
"loss": 0.1878, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.473684210526316, |
|
"grad_norm": 4.1157755851745605, |
|
"learning_rate": 1.4128955322504966e-06, |
|
"loss": 0.1733, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 4.526315789473684, |
|
"grad_norm": 4.860106945037842, |
|
"learning_rate": 1.1465185899987797e-06, |
|
"loss": 0.193, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.578947368421053, |
|
"grad_norm": 4.945047378540039, |
|
"learning_rate": 9.073589027514789e-07, |
|
"loss": 0.1802, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 4.631578947368421, |
|
"grad_norm": 2.316741943359375, |
|
"learning_rate": 6.956893560502359e-07, |
|
"loss": 0.1736, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.684210526315789, |
|
"grad_norm": 4.012813091278076, |
|
"learning_rate": 5.117514686876379e-07, |
|
"loss": 0.1761, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 4.7368421052631575, |
|
"grad_norm": 5.301681995391846, |
|
"learning_rate": 3.557551171299051e-07, |
|
"loss": 0.172, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.7368421052631575, |
|
"eval_loss": 0.6494551301002502, |
|
"eval_runtime": 1.9201, |
|
"eval_samples_per_second": 41.665, |
|
"eval_steps_per_second": 5.208, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.7894736842105265, |
|
"grad_norm": 3.559140205383301, |
|
"learning_rate": 2.27878296044029e-07, |
|
"loss": 0.1734, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 4.842105263157895, |
|
"grad_norm": 7.743849277496338, |
|
"learning_rate": 1.2826691520262114e-07, |
|
"loss": 0.1954, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.894736842105263, |
|
"grad_norm": 3.5408854484558105, |
|
"learning_rate": 5.7034632998231865e-08, |
|
"loss": 0.1744, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 4.947368421052632, |
|
"grad_norm": 2.413121461868286, |
|
"learning_rate": 1.4262726757049982e-08, |
|
"loss": 0.1778, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 2.56962513923645, |
|
"learning_rate": 0.0, |
|
"loss": 0.1836, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 950, |
|
"total_flos": 2.0275085174217114e+17, |
|
"train_loss": 0.5822552880487945, |
|
"train_runtime": 660.0352, |
|
"train_samples_per_second": 11.515, |
|
"train_steps_per_second": 1.439 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 950, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"total_flos": 2.0275085174217114e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|