|
{ |
|
"best_metric": 2.9153051376342773, |
|
"best_model_checkpoint": "checkpoints-mistral-300M-FA2-3/checkpoint-12000", |
|
"epoch": 7.5649913344887345, |
|
"eval_steps": 500, |
|
"global_step": 12000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002999998940072441, |
|
"loss": 9.6899, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002999992462742786, |
|
"loss": 8.3056, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00029999800969575163, |
|
"loss": 7.6302, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00029999618427651757, |
|
"loss": 7.3792, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00029999377002374244, |
|
"loss": 7.2716, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00029999076694690364, |
|
"loss": 7.1973, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0002999871750577904, |
|
"loss": 7.1332, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0002999829943705031, |
|
"loss": 7.0465, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00029997822490145375, |
|
"loss": 6.9584, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0002999728666693657, |
|
"loss": 6.8312, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00029996691969527337, |
|
"loss": 6.7483, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00029996038400252265, |
|
"loss": 6.643, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0002999532596167704, |
|
"loss": 6.57, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00029994554656598456, |
|
"loss": 6.4822, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002999372448804439, |
|
"loss": 6.42, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00029992835459273796, |
|
"loss": 6.3358, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00029991887573776695, |
|
"loss": 6.269, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00029990880835274167, |
|
"loss": 6.1999, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002998981524771831, |
|
"loss": 6.133, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0002998869081529227, |
|
"loss": 6.0829, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00029987507542410165, |
|
"loss": 6.0279, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00029986265433717133, |
|
"loss": 5.9759, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00029984964494089266, |
|
"loss": 5.9344, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002998360472863359, |
|
"loss": 5.8819, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00029982186142688103, |
|
"loss": 5.8322, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00029980708741821673, |
|
"loss": 5.7839, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00029979172531834064, |
|
"loss": 5.7294, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0002997757751875591, |
|
"loss": 5.7153, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00029975923708848695, |
|
"loss": 5.6566, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000299742111086047, |
|
"loss": 5.6366, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0002997243972474701, |
|
"loss": 5.5897, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0002997060956422947, |
|
"loss": 5.5639, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002996872063423667, |
|
"loss": 5.5376, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002996677294218388, |
|
"loss": 5.5143, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00029964766495717087, |
|
"loss": 5.487, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000299627013027129, |
|
"loss": 5.4502, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0002996057737127858, |
|
"loss": 5.4142, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00029958394709751925, |
|
"loss": 5.3994, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0002995615332670133, |
|
"loss": 5.3587, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00029953853230925697, |
|
"loss": 5.3274, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000299514944314544, |
|
"loss": 5.3148, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00029949076937547297, |
|
"loss": 5.2685, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00029946600758694623, |
|
"loss": 5.2593, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00029944065904617006, |
|
"loss": 5.2311, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00029941472385265427, |
|
"loss": 5.2198, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00029938820210821134, |
|
"loss": 5.1929, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0002993610939169567, |
|
"loss": 5.1693, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00029933339938530775, |
|
"loss": 5.1466, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0002993051186219837, |
|
"loss": 5.1009, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00029927625173800513, |
|
"loss": 5.1178, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 5.128925323486328, |
|
"eval_runtime": 362.8452, |
|
"eval_samples_per_second": 209.828, |
|
"eval_steps_per_second": 3.748, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00029924679884669347, |
|
"loss": 5.0697, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0002992167600636708, |
|
"loss": 5.0626, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0002991861355068589, |
|
"loss": 5.0449, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0002991549252964794, |
|
"loss": 5.0268, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0002991231295550528, |
|
"loss": 5.0151, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00029909074840739835, |
|
"loss": 5.0032, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002990577819806332, |
|
"loss": 4.971, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00029902423040417236, |
|
"loss": 4.9582, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0002989900938097277, |
|
"loss": 4.938, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0002989553723313078, |
|
"loss": 4.9201, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0002989200661052174, |
|
"loss": 4.8989, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0002988841752700564, |
|
"loss": 4.8805, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00029884769996672004, |
|
"loss": 4.8578, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0002988106403383978, |
|
"loss": 4.8442, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0002987729965305731, |
|
"loss": 4.8243, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00029873476869102255, |
|
"loss": 4.803, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0002986959569698156, |
|
"loss": 4.8042, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00029865656151931375, |
|
"loss": 4.7661, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00029861658249416995, |
|
"loss": 4.764, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002985760200513281, |
|
"loss": 4.7372, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00029853487435002254, |
|
"loss": 4.7294, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0002984931455517771, |
|
"loss": 4.7145, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0002984508338204048, |
|
"loss": 4.6965, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002984079393220069, |
|
"loss": 4.6641, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002983644622249725, |
|
"loss": 4.6533, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0002983204026999778, |
|
"loss": 4.6306, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002982757609199853, |
|
"loss": 4.6291, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002982305370602433, |
|
"loss": 4.6066, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0002981847312982853, |
|
"loss": 4.585, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00029813834381392875, |
|
"loss": 4.5656, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029809137478927525, |
|
"loss": 4.5646, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00029804382440870895, |
|
"loss": 4.5447, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002979956928588963, |
|
"loss": 4.5335, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0002979469803287853, |
|
"loss": 4.5072, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002978976870096047, |
|
"loss": 4.4861, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002978478130948629, |
|
"loss": 4.4802, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000297797358780348, |
|
"loss": 4.4456, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00029774632426412634, |
|
"loss": 4.4483, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00029769470974654174, |
|
"loss": 4.419, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002976425154302152, |
|
"loss": 4.4263, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002975897415200437, |
|
"loss": 4.3959, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002975363882231994, |
|
"loss": 4.384, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00029748245574912915, |
|
"loss": 4.3722, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00029742794430955325, |
|
"loss": 4.3491, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000297372854118465, |
|
"loss": 4.347, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00029731718539212945, |
|
"loss": 4.3346, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000297260938349083, |
|
"loss": 4.3087, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00029720411321013226, |
|
"loss": 4.3229, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002971467101983531, |
|
"loss": 4.3127, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002970887295390902, |
|
"loss": 4.3028, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_loss": 4.317672252655029, |
|
"eval_runtime": 232.0131, |
|
"eval_samples_per_second": 328.15, |
|
"eval_steps_per_second": 5.862, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00029703017145995566, |
|
"loss": 4.2698, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0002969710361908284, |
|
"loss": 4.2621, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002969113239638532, |
|
"loss": 4.2548, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00029685103501343973, |
|
"loss": 4.2319, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00029679016957626173, |
|
"loss": 4.2401, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002967287278912561, |
|
"loss": 4.2409, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002966667101996218, |
|
"loss": 4.2014, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00029660411674481884, |
|
"loss": 4.1969, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00029654094777256776, |
|
"loss": 4.1922, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002964772035308483, |
|
"loss": 4.1694, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002964128842698984, |
|
"loss": 4.1732, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00029634799024221345, |
|
"loss": 4.1665, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002962825217025451, |
|
"loss": 4.161, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002962164789079004, |
|
"loss": 4.1565, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00029614986211754066, |
|
"loss": 4.1453, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00029608267159298064, |
|
"loss": 4.1329, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002960149075979871, |
|
"loss": 4.1231, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00029594657039857836, |
|
"loss": 4.1111, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002958776602630228, |
|
"loss": 4.0985, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002958081774618379, |
|
"loss": 4.103, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002957381222677894, |
|
"loss": 4.1013, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00029566749495588986, |
|
"loss": 4.0805, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002955962958033978, |
|
"loss": 4.08, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00029552452508981687, |
|
"loss": 4.0518, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00029545218309689407, |
|
"loss": 4.0406, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002953792701086193, |
|
"loss": 4.0475, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00029530578641122395, |
|
"loss": 4.0357, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00029523173229317973, |
|
"loss": 4.0333, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002951571080451978, |
|
"loss": 4.0324, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0002950819139602272, |
|
"loss": 4.008, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002950061503334543, |
|
"loss": 4.0081, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002949298174623009, |
|
"loss": 4.0138, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00029485291564642377, |
|
"loss": 3.9971, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.000294775445187713, |
|
"loss": 4.0074, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002946974063902911, |
|
"loss": 3.9783, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002946187995605115, |
|
"loss": 3.9844, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002945396250069576, |
|
"loss": 3.9594, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002944598830404417, |
|
"loss": 3.9639, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0002943795739740033, |
|
"loss": 3.9662, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0002942986981229082, |
|
"loss": 3.9468, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002942172558046473, |
|
"loss": 3.9549, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00029413524733893514, |
|
"loss": 3.9311, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0002940526730477088, |
|
"loss": 3.9261, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0002939695332551267, |
|
"loss": 3.9248, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.000293885828287567, |
|
"loss": 3.9043, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0002938015584736267, |
|
"loss": 3.9027, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00029371672414412034, |
|
"loss": 3.906, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002936313256320782, |
|
"loss": 3.9054, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0002935453632727456, |
|
"loss": 3.8934, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002934588374035812, |
|
"loss": 3.9062, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 3.932741165161133, |
|
"eval_runtime": 231.8207, |
|
"eval_samples_per_second": 328.422, |
|
"eval_steps_per_second": 5.867, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00029337174836425593, |
|
"loss": 3.8889, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0002932840964966515, |
|
"loss": 3.8737, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0002931958821448589, |
|
"loss": 3.8813, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0002931071056551776, |
|
"loss": 3.8551, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0002930177673761135, |
|
"loss": 3.8772, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00029292786765837815, |
|
"loss": 3.867, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002928374068548868, |
|
"loss": 3.857, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0002927463853207577, |
|
"loss": 3.8386, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00029265480341331005, |
|
"loss": 3.8578, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.000292562661492063, |
|
"loss": 3.8435, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0002924699599187341, |
|
"loss": 3.8417, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.000292376699057238, |
|
"loss": 3.8288, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002922828792736847, |
|
"loss": 3.8254, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002921885009363786, |
|
"loss": 3.8248, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0002920935644158166, |
|
"loss": 3.8104, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0002919980700846869, |
|
"loss": 3.8048, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00029190201831786743, |
|
"loss": 3.792, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0002918054094924246, |
|
"loss": 3.7815, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0002917082439876113, |
|
"loss": 3.7978, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00029161052218486613, |
|
"loss": 3.7963, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00029151224446781124, |
|
"loss": 3.781, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0002914134112222513, |
|
"loss": 3.7736, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002913140228361717, |
|
"loss": 3.7792, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.000291214079699737, |
|
"loss": 3.774, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0002911135822052899, |
|
"loss": 3.7752, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00029101253074734885, |
|
"loss": 3.7709, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00029091092572260726, |
|
"loss": 3.7504, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00029080876752993163, |
|
"loss": 3.7567, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00029070605657035995, |
|
"loss": 3.7666, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0002906027932471002, |
|
"loss": 3.7396, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0002904989779655287, |
|
"loss": 3.7363, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002903946111331886, |
|
"loss": 3.7365, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00029028969315978846, |
|
"loss": 3.7414, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0002901842244572, |
|
"loss": 3.7447, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00029007820543945726, |
|
"loss": 3.7083, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00028997163652275446, |
|
"loss": 3.7141, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00028986451812544465, |
|
"loss": 3.719, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00028975685066803786, |
|
"loss": 3.6981, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0002896486345731996, |
|
"loss": 3.7213, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0002895398702657491, |
|
"loss": 3.7031, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00028943055817265775, |
|
"loss": 3.697, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0002893206987230472, |
|
"loss": 3.6925, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0002892102923481881, |
|
"loss": 3.6913, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0002890993394814979, |
|
"loss": 3.694, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0002889878405585395, |
|
"loss": 3.6847, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00028887579601701935, |
|
"loss": 3.6871, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002887632062967859, |
|
"loss": 3.6958, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00028865007183982776, |
|
"loss": 3.6716, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.000288536393090272, |
|
"loss": 3.6803, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0002884221704943823, |
|
"loss": 3.6796, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_loss": 3.7106287479400635, |
|
"eval_runtime": 231.452, |
|
"eval_samples_per_second": 328.945, |
|
"eval_steps_per_second": 5.876, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00028830740450055747, |
|
"loss": 3.6717, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00028819209555932943, |
|
"loss": 3.671, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00028807624412336135, |
|
"loss": 3.6582, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0002879598506474463, |
|
"loss": 3.6543, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0002878429155885052, |
|
"loss": 3.6701, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0002877254394055849, |
|
"loss": 3.6581, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0002876074225598565, |
|
"loss": 3.6328, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00028748886551461367, |
|
"loss": 3.6513, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00028736976873527074, |
|
"loss": 3.635, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00028725013268936076, |
|
"loss": 3.6369, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0002871299578465338, |
|
"loss": 3.6442, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.000287009244678555, |
|
"loss": 3.6478, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0002868879936593029, |
|
"loss": 3.6244, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0002867662052647674, |
|
"loss": 3.6349, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00028664387997304777, |
|
"loss": 3.6069, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0002865210182643514, |
|
"loss": 3.6206, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0002863976206209909, |
|
"loss": 3.6199, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00028627368752738325, |
|
"loss": 3.6152, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002861492194700472, |
|
"loss": 3.6227, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00028602421693760163, |
|
"loss": 3.6011, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0002858986804207636, |
|
"loss": 3.6167, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00028577261041234627, |
|
"loss": 3.58, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00028564600740725745, |
|
"loss": 3.6072, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0002855188719024969, |
|
"loss": 3.5932, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00028539120439715504, |
|
"loss": 3.5748, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00028526300539241065, |
|
"loss": 3.5769, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00028513427539152906, |
|
"loss": 3.5949, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0002850050148998601, |
|
"loss": 3.59, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0002848752244248361, |
|
"loss": 3.5819, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0002847449044759699, |
|
"loss": 3.5742, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00028461405556485283, |
|
"loss": 3.5698, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00028448267820515286, |
|
"loss": 3.5552, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00028435077291261244, |
|
"loss": 3.5663, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00028421834020504646, |
|
"loss": 3.5596, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00028408538060234023, |
|
"loss": 3.5519, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0002839518946264476, |
|
"loss": 3.559, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00028381788280138864, |
|
"loss": 3.5509, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0002836833456532478, |
|
"loss": 3.5612, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0002835482837101718, |
|
"loss": 3.538, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0002834126975023674, |
|
"loss": 3.536, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00028327658756209956, |
|
"loss": 3.5435, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002831399544236892, |
|
"loss": 3.5472, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002830027986235111, |
|
"loss": 3.5376, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00028286512069999187, |
|
"loss": 3.5466, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0002827269211936078, |
|
"loss": 3.5272, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0002825882006468827, |
|
"loss": 3.5275, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00028244895960438584, |
|
"loss": 3.5263, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002823091986127298, |
|
"loss": 3.536, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00028216891822056817, |
|
"loss": 3.5218, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0002820281189785937, |
|
"loss": 3.5212, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"eval_loss": 3.564971685409546, |
|
"eval_runtime": 231.4549, |
|
"eval_samples_per_second": 328.941, |
|
"eval_steps_per_second": 5.876, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0002818868014395359, |
|
"loss": 3.516, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0002817449661581589, |
|
"loss": 3.513, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00028160261369125937, |
|
"loss": 3.5122, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002814597445976642, |
|
"loss": 3.4902, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00028131635943822846, |
|
"loss": 3.5033, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.000281172458775833, |
|
"loss": 3.5199, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00028102804317538255, |
|
"loss": 3.5023, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00028088311320380305, |
|
"loss": 3.4981, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00028073766943003997, |
|
"loss": 3.4965, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0002805917124250556, |
|
"loss": 3.5009, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.000280445242761827, |
|
"loss": 3.499, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0002802982610153439, |
|
"loss": 3.4942, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00028015076776260605, |
|
"loss": 3.4787, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0002800027635826214, |
|
"loss": 3.4983, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0002798542490564037, |
|
"loss": 3.4939, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00027970522476696984, |
|
"loss": 3.4799, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00027955569129933805, |
|
"loss": 3.461, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00027940564924052553, |
|
"loss": 3.4817, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00027925509917954577, |
|
"loss": 3.4738, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0002791040417074067, |
|
"loss": 3.4764, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00027895247741710813, |
|
"loss": 3.4696, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0002788004069036394, |
|
"loss": 3.4622, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00027864783076397723, |
|
"loss": 3.4669, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0002784947495970831, |
|
"loss": 3.4712, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0002783411640039012, |
|
"loss": 3.4702, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0002781870745873558, |
|
"loss": 3.4626, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002780324819523491, |
|
"loss": 3.444, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002778773867057587, |
|
"loss": 3.4455, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00027772178945643537, |
|
"loss": 3.4417, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00027756569081520047, |
|
"loss": 3.4368, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0002774090913948437, |
|
"loss": 3.4543, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0002772519918101206, |
|
"loss": 3.4426, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00027709439267775026, |
|
"loss": 3.4516, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0002769362946164128, |
|
"loss": 3.4368, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00027677769824674696, |
|
"loss": 3.4482, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0002766186041913476, |
|
"loss": 3.4555, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0002764590130747634, |
|
"loss": 3.4459, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00027629892552349434, |
|
"loss": 3.4408, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00027613834216598916, |
|
"loss": 3.4288, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00027597726363264306, |
|
"loss": 3.4429, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0002758156905557951, |
|
"loss": 3.4363, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0002756536235697256, |
|
"loss": 3.4278, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002754910633106541, |
|
"loss": 3.4199, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00027532801041673624, |
|
"loss": 3.4193, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0002751644655280618, |
|
"loss": 3.416, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00027500042928665187, |
|
"loss": 3.4273, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0002748359023364565, |
|
"loss": 3.4276, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00027467088532335187, |
|
"loss": 3.4175, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0002745053788951383, |
|
"loss": 3.4092, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002743393837015371, |
|
"loss": 3.4045, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 3.44966721534729, |
|
"eval_runtime": 231.419, |
|
"eval_samples_per_second": 328.992, |
|
"eval_steps_per_second": 5.877, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0002741729003941885, |
|
"loss": 3.4111, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00027400592962664884, |
|
"loss": 3.4155, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.000273838472054388, |
|
"loss": 3.403, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.000273670528334787, |
|
"loss": 3.4067, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0002735020991271352, |
|
"loss": 3.3993, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0002733331850926279, |
|
"loss": 3.3928, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00027316378689436357, |
|
"loss": 3.4282, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0002729939051973415, |
|
"loss": 3.3903, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0002728235406684588, |
|
"loss": 3.4059, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0002726526939765082, |
|
"loss": 3.386, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0002724813657921752, |
|
"loss": 3.3977, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002723095567880354, |
|
"loss": 3.3909, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002721372676385521, |
|
"loss": 3.3833, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0002719644990200733, |
|
"loss": 3.383, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0002717912516108292, |
|
"loss": 3.3826, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0002716175260909298, |
|
"loss": 3.3754, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0002714433231423619, |
|
"loss": 3.3547, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00027126864344898633, |
|
"loss": 3.3545, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00027109348769653575, |
|
"loss": 3.3543, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00027091785657261153, |
|
"loss": 3.3571, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0002707417507666811, |
|
"loss": 3.3657, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0002705651709700755, |
|
"loss": 3.3608, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00027038811787598636, |
|
"loss": 3.3607, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00027021059217946335, |
|
"loss": 3.3625, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0002700325945774115, |
|
"loss": 3.3365, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00026985412576858823, |
|
"loss": 3.3552, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00026967518645360086, |
|
"loss": 3.3539, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00026949577733490377, |
|
"loss": 3.3554, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00026931589911679556, |
|
"loss": 3.3367, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0002691355525054163, |
|
"loss": 3.3529, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.000268954738208745, |
|
"loss": 3.3458, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.0002687734569365965, |
|
"loss": 3.3611, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0002685917094006189, |
|
"loss": 3.3554, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0002684094963142906, |
|
"loss": 3.3453, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002682268183929176, |
|
"loss": 3.332, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0002680436763536308, |
|
"loss": 3.3356, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00026786007091538297, |
|
"loss": 3.3248, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.000267676002798946, |
|
"loss": 3.3346, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00026749147272690825, |
|
"loss": 3.3365, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00026730648142367136, |
|
"loss": 3.3292, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00026712102961544773, |
|
"loss": 3.3306, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0002669351180302575, |
|
"loss": 3.3376, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0002667487473979258, |
|
"loss": 3.3219, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0002665619184500797, |
|
"loss": 3.3314, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00026637463192014555, |
|
"loss": 3.3386, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00026618688854334596, |
|
"loss": 3.3298, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002659986890566971, |
|
"loss": 3.3246, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002658100341990055, |
|
"loss": 3.328, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0002656209247108654, |
|
"loss": 3.3225, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00026543136133465566, |
|
"loss": 3.3238, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"eval_loss": 3.364104986190796, |
|
"eval_runtime": 231.353, |
|
"eval_samples_per_second": 329.086, |
|
"eval_steps_per_second": 5.878, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00026524134481453716, |
|
"loss": 3.3292, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00026505087589644946, |
|
"loss": 3.3076, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0002648599553281082, |
|
"loss": 3.3122, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0002646685838590021, |
|
"loss": 3.3057, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00026447676224038966, |
|
"loss": 3.2999, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00026428449122529687, |
|
"loss": 3.3088, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00026409177156851377, |
|
"loss": 3.2952, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002638986040265915, |
|
"loss": 3.3009, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00026370498935783954, |
|
"loss": 3.3222, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00026351092832232266, |
|
"loss": 3.3066, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.000263316421681858, |
|
"loss": 3.3035, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00026312147020001163, |
|
"loss": 3.3059, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00026292607464209635, |
|
"loss": 3.2897, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.000262730235775168, |
|
"loss": 3.3011, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0002625339543680228, |
|
"loss": 3.3175, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00026233723119119414, |
|
"loss": 3.2998, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0002621400670169497, |
|
"loss": 3.3112, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0002619424626192884, |
|
"loss": 3.2962, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002617444187739372, |
|
"loss": 3.2828, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002615459362583484, |
|
"loss": 3.2919, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0002613470158516962, |
|
"loss": 3.2742, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00026114765833487374, |
|
"loss": 3.2876, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0002609478644904903, |
|
"loss": 3.2987, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.000260747635102868, |
|
"loss": 3.2979, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00026054697095803853, |
|
"loss": 3.2827, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.0002603458728437406, |
|
"loss": 3.2907, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0002601443415494164, |
|
"loss": 3.2716, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0002599423778662085, |
|
"loss": 3.2919, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.00025973998258695715, |
|
"loss": 3.2793, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0002595371565061966, |
|
"loss": 3.2895, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002593339004201526, |
|
"loss": 3.2629, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.00025913021512673863, |
|
"loss": 3.281, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0002589261014255534, |
|
"loss": 3.2647, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0002587215601178771, |
|
"loss": 3.2834, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0002585165920066688, |
|
"loss": 3.27, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00025831119789656296, |
|
"loss": 3.2528, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.00025810537859386634, |
|
"loss": 3.2714, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0002578991349065549, |
|
"loss": 3.2773, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.00025769246764427054, |
|
"loss": 3.2661, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.000257485377618318, |
|
"loss": 3.2672, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.00025727786564166174, |
|
"loss": 3.2664, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0002570699325289225, |
|
"loss": 3.2444, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.00025686157909637425, |
|
"loss": 3.2565, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002566528061619412, |
|
"loss": 3.2536, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0002564436145451941, |
|
"loss": 3.2727, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00025623400506734757, |
|
"loss": 3.2585, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.0002560239785512564, |
|
"loss": 3.2558, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.00025581353582141255, |
|
"loss": 3.2512, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.0002556026777039422, |
|
"loss": 3.2573, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0002553914050266017, |
|
"loss": 3.2587, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"eval_loss": 3.2957873344421387, |
|
"eval_runtime": 231.6662, |
|
"eval_samples_per_second": 328.641, |
|
"eval_steps_per_second": 5.871, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.0002551797186187751, |
|
"loss": 3.2439, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00025496761931147075, |
|
"loss": 3.2466, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.0002547551079373176, |
|
"loss": 3.244, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002545421853305625, |
|
"loss": 3.2359, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002543288523270663, |
|
"loss": 3.2601, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0002541151097643014, |
|
"loss": 3.2408, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0002539009584813476, |
|
"loss": 3.2462, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0002536863993188894, |
|
"loss": 3.2459, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.0002534714331192124, |
|
"loss": 3.2358, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.00025325606072620015, |
|
"loss": 3.2436, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00025304028298533076, |
|
"loss": 3.2471, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00025282410074367357, |
|
"loss": 3.246, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00025260751484988583, |
|
"loss": 3.224, |
|
"step": 4130 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.0002523905261542095, |
|
"loss": 3.2352, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00025217313550846763, |
|
"loss": 3.2328, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002519553437660614, |
|
"loss": 3.2436, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.0002517371517819664, |
|
"loss": 3.2193, |
|
"step": 4170 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.00025151856041272946, |
|
"loss": 3.2364, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00025129957051646525, |
|
"loss": 3.2247, |
|
"step": 4190 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00025108018295285305, |
|
"loss": 3.2206, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00025086039858313307, |
|
"loss": 3.2104, |
|
"step": 4210 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00025064021827010327, |
|
"loss": 3.2223, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00025041964287811613, |
|
"loss": 3.2228, |
|
"step": 4230 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00025019867327307475, |
|
"loss": 3.2256, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.0002499773103224301, |
|
"loss": 3.2202, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.0002497555548951771, |
|
"loss": 3.2146, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.0002495334078618515, |
|
"loss": 3.2248, |
|
"step": 4270 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002493108700945263, |
|
"loss": 3.2263, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00024908794246680837, |
|
"loss": 3.2199, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.0002488646258538351, |
|
"loss": 3.2165, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.0002486409211322708, |
|
"loss": 3.2257, |
|
"step": 4310 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00024841682918030347, |
|
"loss": 3.213, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00024819235087764123, |
|
"loss": 3.2143, |
|
"step": 4330 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0002479674871055089, |
|
"loss": 3.2147, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0002477422387466443, |
|
"loss": 3.2156, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.0002475166066852954, |
|
"loss": 3.1976, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00024729059180721613, |
|
"loss": 3.2241, |
|
"step": 4370 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00024706419499966335, |
|
"loss": 3.2114, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002468374171513932, |
|
"loss": 3.2148, |
|
"step": 4390 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.00024661025915265775, |
|
"loss": 3.2067, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00024638272189520124, |
|
"loss": 3.2173, |
|
"step": 4410 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.0002461548062722569, |
|
"loss": 3.2083, |
|
"step": 4420 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.0002459265131785433, |
|
"loss": 3.202, |
|
"step": 4430 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00024569784351026073, |
|
"loss": 3.2152, |
|
"step": 4440 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0002454687981650877, |
|
"loss": 3.1965, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0002452393780421779, |
|
"loss": 3.2036, |
|
"step": 4460 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.0002450095840421558, |
|
"loss": 3.2003, |
|
"step": 4470 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00024477941706711385, |
|
"loss": 3.2016, |
|
"step": 4480 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0002445488780206086, |
|
"loss": 3.1901, |
|
"step": 4490 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002443179678076572, |
|
"loss": 3.2023, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 3.2410736083984375, |
|
"eval_runtime": 232.4819, |
|
"eval_samples_per_second": 327.488, |
|
"eval_steps_per_second": 5.85, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00024408668733473388, |
|
"loss": 3.205, |
|
"step": 4510 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.0002438550375097664, |
|
"loss": 3.2041, |
|
"step": 4520 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00024362301924213257, |
|
"loss": 3.2007, |
|
"step": 4530 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00024339063344265641, |
|
"loss": 3.1913, |
|
"step": 4540 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00024315788102360477, |
|
"loss": 3.1823, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00024292476289868378, |
|
"loss": 3.1869, |
|
"step": 4560 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0002426912799830353, |
|
"loss": 3.1804, |
|
"step": 4570 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.000242457433193233, |
|
"loss": 3.1816, |
|
"step": 4580 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00024222322344727922, |
|
"loss": 3.1914, |
|
"step": 4590 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00024198865166460097, |
|
"loss": 3.1816, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.00024175371876604656, |
|
"loss": 3.1705, |
|
"step": 4610 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.000241518425673882, |
|
"loss": 3.1754, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00024128277331178714, |
|
"loss": 3.18, |
|
"step": 4630 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00024104676260485228, |
|
"loss": 3.1763, |
|
"step": 4640 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00024081039447957451, |
|
"loss": 3.1827, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00024057366986385392, |
|
"loss": 3.1719, |
|
"step": 4660 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00024033658968699017, |
|
"loss": 3.1723, |
|
"step": 4670 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0002400991548796786, |
|
"loss": 3.1831, |
|
"step": 4680 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00023986136637400686, |
|
"loss": 3.1744, |
|
"step": 4690 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00023962322510345086, |
|
"loss": 3.1742, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00023938473200287166, |
|
"loss": 3.1726, |
|
"step": 4710 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00023914588800851128, |
|
"loss": 3.1819, |
|
"step": 4720 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00023890669405798924, |
|
"loss": 3.1574, |
|
"step": 4730 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00023866715109029885, |
|
"loss": 3.1578, |
|
"step": 4740 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0002384272600458037, |
|
"loss": 3.1794, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00023818702186623365, |
|
"loss": 3.1638, |
|
"step": 4760 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.00023794643749468135, |
|
"loss": 3.1713, |
|
"step": 4770 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.00023770550787559846, |
|
"loss": 3.1672, |
|
"step": 4780 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.000237464233954792, |
|
"loss": 3.1633, |
|
"step": 4790 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.00023722261667942057, |
|
"loss": 3.1574, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.00023698065699799065, |
|
"loss": 3.1555, |
|
"step": 4810 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.0002367383558603529, |
|
"loss": 3.1487, |
|
"step": 4820 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00023649571421769852, |
|
"loss": 3.154, |
|
"step": 4830 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0002362527330225552, |
|
"loss": 3.1571, |
|
"step": 4840 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00023600941322878382, |
|
"loss": 3.1508, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00023576575579157432, |
|
"loss": 3.1512, |
|
"step": 4860 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.0002355217616674422, |
|
"loss": 3.1492, |
|
"step": 4870 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.0002352774318142247, |
|
"loss": 3.1457, |
|
"step": 4880 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.0002350327671910769, |
|
"loss": 3.1452, |
|
"step": 4890 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00023478776875846815, |
|
"loss": 3.1326, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00023454243747817827, |
|
"loss": 3.1484, |
|
"step": 4910 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.0002342967743132936, |
|
"loss": 3.1389, |
|
"step": 4920 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.00023405078022820344, |
|
"loss": 3.1407, |
|
"step": 4930 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.00023380445618859612, |
|
"loss": 3.143, |
|
"step": 4940 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00023355780316145535, |
|
"loss": 3.1251, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00023331082211505624, |
|
"loss": 3.1261, |
|
"step": 4960 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00023306351401896158, |
|
"loss": 3.1344, |
|
"step": 4970 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.0002328158798440181, |
|
"loss": 3.1344, |
|
"step": 4980 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.00023256792056235254, |
|
"loss": 3.1245, |
|
"step": 4990 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.00023231963714736802, |
|
"loss": 3.1294, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_loss": 3.191751718521118, |
|
"eval_runtime": 231.6631, |
|
"eval_samples_per_second": 328.645, |
|
"eval_steps_per_second": 5.871, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.00023207103057374003, |
|
"loss": 3.1464, |
|
"step": 5010 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0002318221018174125, |
|
"loss": 3.1494, |
|
"step": 5020 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.00023157285185559446, |
|
"loss": 3.1321, |
|
"step": 5030 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00023132328166675561, |
|
"loss": 3.1534, |
|
"step": 5040 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00023107339223062297, |
|
"loss": 3.1463, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.00023082318452817663, |
|
"loss": 3.1359, |
|
"step": 5060 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00023057265954164614, |
|
"loss": 3.1323, |
|
"step": 5070 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00023032181825450668, |
|
"loss": 3.1299, |
|
"step": 5080 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00023007066165147503, |
|
"loss": 3.146, |
|
"step": 5090 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00022981919071850577, |
|
"loss": 3.1275, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.00022956740644278754, |
|
"loss": 3.1321, |
|
"step": 5110 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00022931530981273903, |
|
"loss": 3.1333, |
|
"step": 5120 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00022906290181800494, |
|
"loss": 3.1364, |
|
"step": 5130 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.0002288101834494526, |
|
"loss": 3.1311, |
|
"step": 5140 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00022855715569916745, |
|
"loss": 3.1292, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00022830381956044974, |
|
"loss": 3.1364, |
|
"step": 5160 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.0002280501760278101, |
|
"loss": 3.1342, |
|
"step": 5170 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.00022779622609696607, |
|
"loss": 3.1139, |
|
"step": 5180 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00022754197076483781, |
|
"loss": 3.1168, |
|
"step": 5190 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00022728741102954458, |
|
"loss": 3.1144, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00022703254789040056, |
|
"loss": 3.1203, |
|
"step": 5210 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.0002267773823479109, |
|
"loss": 3.1184, |
|
"step": 5220 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00022652191540376793, |
|
"loss": 3.115, |
|
"step": 5230 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00022626614806084725, |
|
"loss": 3.116, |
|
"step": 5240 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.00022601008132320372, |
|
"loss": 3.1191, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.00022575371619606738, |
|
"loss": 3.1217, |
|
"step": 5260 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00022549705368583975, |
|
"loss": 3.1143, |
|
"step": 5270 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00022524009480008977, |
|
"loss": 3.1103, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00022498284054754984, |
|
"loss": 3.0979, |
|
"step": 5290 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.00022472529193811187, |
|
"loss": 3.0993, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.00022446744998282328, |
|
"loss": 3.1084, |
|
"step": 5310 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.00022420931569388315, |
|
"loss": 3.1025, |
|
"step": 5320 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.0002239508900846381, |
|
"loss": 3.1086, |
|
"step": 5330 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.00022369217416957834, |
|
"loss": 3.106, |
|
"step": 5340 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.00022343316896433378, |
|
"loss": 3.0939, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.00022317387548566995, |
|
"loss": 3.1205, |
|
"step": 5360 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.00022291429475148413, |
|
"loss": 3.1047, |
|
"step": 5370 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00022265442778080112, |
|
"loss": 3.1108, |
|
"step": 5380 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.00022239427559376946, |
|
"loss": 3.1116, |
|
"step": 5390 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.00022213383921165737, |
|
"loss": 3.1005, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.00022187311965684873, |
|
"loss": 3.0941, |
|
"step": 5410 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00022161211795283903, |
|
"loss": 3.1077, |
|
"step": 5420 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00022135083512423134, |
|
"loss": 3.1011, |
|
"step": 5430 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00022108927219673244, |
|
"loss": 3.0966, |
|
"step": 5440 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.0002208274301971485, |
|
"loss": 3.0986, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.0002205653101533815, |
|
"loss": 3.1077, |
|
"step": 5460 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00022030291309442472, |
|
"loss": 3.1003, |
|
"step": 5470 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.0002200402400503589, |
|
"loss": 3.1052, |
|
"step": 5480 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.00021977729205234828, |
|
"loss": 3.0985, |
|
"step": 5490 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.0002195140701326365, |
|
"loss": 3.1086, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"eval_loss": 3.1522881984710693, |
|
"eval_runtime": 231.5362, |
|
"eval_samples_per_second": 328.825, |
|
"eval_steps_per_second": 5.874, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.00021925057532454242, |
|
"loss": 3.1143, |
|
"step": 5510 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00021898680866245629, |
|
"loss": 3.0888, |
|
"step": 5520 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.00021872277118183546, |
|
"loss": 3.103, |
|
"step": 5530 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.0002184584639192005, |
|
"loss": 3.0973, |
|
"step": 5540 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.00021819388791213094, |
|
"loss": 3.1052, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.00021792904419926145, |
|
"loss": 3.0999, |
|
"step": 5560 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00021766393382027758, |
|
"loss": 3.1019, |
|
"step": 5570 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.0002173985578159116, |
|
"loss": 3.0909, |
|
"step": 5580 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00021713291722793866, |
|
"loss": 3.0935, |
|
"step": 5590 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00021686701309917258, |
|
"loss": 3.0937, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00021660084647346164, |
|
"loss": 3.1037, |
|
"step": 5610 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00021633441839568476, |
|
"loss": 3.0861, |
|
"step": 5620 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00021606772991174708, |
|
"loss": 3.0893, |
|
"step": 5630 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00021580078206857606, |
|
"loss": 3.0782, |
|
"step": 5640 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.0002155335759141173, |
|
"loss": 3.0919, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00021526611249733052, |
|
"loss": 3.0914, |
|
"step": 5660 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00021499839286818527, |
|
"loss": 3.0919, |
|
"step": 5670 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00021473041807765688, |
|
"loss": 3.0852, |
|
"step": 5680 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.00021446218917772249, |
|
"loss": 3.0811, |
|
"step": 5690 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.00021419370722135665, |
|
"loss": 3.0896, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00021392497326252737, |
|
"loss": 3.0909, |
|
"step": 5710 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00021365598835619201, |
|
"loss": 3.087, |
|
"step": 5720 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.00021338675355829297, |
|
"loss": 3.0814, |
|
"step": 5730 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00021311726992575362, |
|
"loss": 3.0856, |
|
"step": 5740 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00021284753851647428, |
|
"loss": 3.0843, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.00021257756038932783, |
|
"loss": 3.077, |
|
"step": 5760 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.0002123073366041558, |
|
"loss": 3.0701, |
|
"step": 5770 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00021203686822176404, |
|
"loss": 3.0914, |
|
"step": 5780 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0002117661563039185, |
|
"loss": 3.0674, |
|
"step": 5790 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.00021149520191334132, |
|
"loss": 3.071, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.00021122400611370645, |
|
"loss": 3.0797, |
|
"step": 5810 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00021095256996963557, |
|
"loss": 3.0826, |
|
"step": 5820 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.0002106808945466937, |
|
"loss": 3.0722, |
|
"step": 5830 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00021040898091138544, |
|
"loss": 3.0675, |
|
"step": 5840 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.00021013683013115035, |
|
"loss": 3.0702, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.00020986444327435896, |
|
"loss": 3.0713, |
|
"step": 5860 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00020959182141030866, |
|
"loss": 3.0685, |
|
"step": 5870 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.00020931896560921927, |
|
"loss": 3.0672, |
|
"step": 5880 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.00020904587694222908, |
|
"loss": 3.0757, |
|
"step": 5890 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00020877255648139035, |
|
"loss": 3.074, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.0002084990052996655, |
|
"loss": 3.0793, |
|
"step": 5910 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.00020822522447092262, |
|
"loss": 3.0852, |
|
"step": 5920 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00020795121506993124, |
|
"loss": 3.0712, |
|
"step": 5930 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00020767697817235817, |
|
"loss": 3.0675, |
|
"step": 5940 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.00020740251485476345, |
|
"loss": 3.0627, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.0002071278261945958, |
|
"loss": 3.0644, |
|
"step": 5960 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00020685291327018866, |
|
"loss": 3.073, |
|
"step": 5970 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00020657777716075589, |
|
"loss": 3.0741, |
|
"step": 5980 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00020630241894638727, |
|
"loss": 3.0745, |
|
"step": 5990 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00020602683970804474, |
|
"loss": 3.0731, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"eval_loss": 3.115403652191162, |
|
"eval_runtime": 231.5784, |
|
"eval_samples_per_second": 328.766, |
|
"eval_steps_per_second": 5.873, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.00020575104052755782, |
|
"loss": 3.0535, |
|
"step": 6010 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0002054750224876194, |
|
"loss": 3.0738, |
|
"step": 6020 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00020519878667178167, |
|
"loss": 3.0598, |
|
"step": 6030 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00020492233416445152, |
|
"loss": 3.0621, |
|
"step": 6040 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00020464566605088666, |
|
"loss": 3.0625, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.00020436878341719116, |
|
"loss": 3.0692, |
|
"step": 6060 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.00020409168735031124, |
|
"loss": 3.0621, |
|
"step": 6070 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0002038143789380309, |
|
"loss": 3.0681, |
|
"step": 6080 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00020353685926896784, |
|
"loss": 3.054, |
|
"step": 6090 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00020325912943256898, |
|
"loss": 3.0648, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.0002029811905191064, |
|
"loss": 3.0569, |
|
"step": 6110 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.0002027030436196729, |
|
"loss": 3.0654, |
|
"step": 6120 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.00020242468982617775, |
|
"loss": 3.0641, |
|
"step": 6130 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.0002021461302313423, |
|
"loss": 3.0408, |
|
"step": 6140 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00020186736592869606, |
|
"loss": 3.0616, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00020158839801257192, |
|
"loss": 3.0528, |
|
"step": 6160 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.00020130922757810222, |
|
"loss": 3.06, |
|
"step": 6170 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.00020102985572121427, |
|
"loss": 3.0514, |
|
"step": 6180 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.0002007502835386261, |
|
"loss": 3.0604, |
|
"step": 6190 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.0002004705121278421, |
|
"loss": 3.0602, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00020019054258714887, |
|
"loss": 3.0492, |
|
"step": 6210 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.00019991037601561074, |
|
"loss": 3.0482, |
|
"step": 6220 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.00019963001351306552, |
|
"loss": 3.054, |
|
"step": 6230 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.00019934945618012016, |
|
"loss": 3.0552, |
|
"step": 6240 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.00019906870511814643, |
|
"loss": 3.0339, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.0001987877614292767, |
|
"loss": 3.0507, |
|
"step": 6260 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.00019850662621639941, |
|
"loss": 3.0447, |
|
"step": 6270 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.000198225300583155, |
|
"loss": 3.0439, |
|
"step": 6280 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.00019794378563393135, |
|
"loss": 3.0676, |
|
"step": 6290 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.00019766208247385943, |
|
"loss": 3.0559, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.0001973801922088093, |
|
"loss": 3.0542, |
|
"step": 6310 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.00019709811594538528, |
|
"loss": 3.0367, |
|
"step": 6320 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00019681585479092208, |
|
"loss": 3.0419, |
|
"step": 6330 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.00019653340985348005, |
|
"loss": 3.0448, |
|
"step": 6340 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.00019625078224184113, |
|
"loss": 3.0384, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.00019596797306550425, |
|
"loss": 3.0378, |
|
"step": 6360 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.00019568498343468124, |
|
"loss": 3.0153, |
|
"step": 6370 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00019540181446029223, |
|
"loss": 3.0367, |
|
"step": 6380 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.00019511846725396145, |
|
"loss": 3.0358, |
|
"step": 6390 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.00019483494292801284, |
|
"loss": 3.0304, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.0001945512425954655, |
|
"loss": 3.0338, |
|
"step": 6410 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 0.00019426736737002964, |
|
"loss": 3.0216, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 0.00019398331836610193, |
|
"loss": 3.0279, |
|
"step": 6430 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.00019369909669876136, |
|
"loss": 3.0267, |
|
"step": 6440 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.00019341470348376453, |
|
"loss": 3.0294, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 0.00019313013983754165, |
|
"loss": 3.0223, |
|
"step": 6460 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.00019284540687719196, |
|
"loss": 3.028, |
|
"step": 6470 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.0001925605057204793, |
|
"loss": 3.0214, |
|
"step": 6480 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 0.00019227543748582786, |
|
"loss": 3.0319, |
|
"step": 6490 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.00019199020329231767, |
|
"loss": 3.0129, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"eval_loss": 3.0856213569641113, |
|
"eval_runtime": 231.6823, |
|
"eval_samples_per_second": 328.618, |
|
"eval_steps_per_second": 5.87, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.00019170480425968034, |
|
"loss": 3.0334, |
|
"step": 6510 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.00019141924150829445, |
|
"loss": 3.0273, |
|
"step": 6520 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.0001911335161591814, |
|
"loss": 3.0207, |
|
"step": 6530 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.00019084762933400083, |
|
"loss": 3.0216, |
|
"step": 6540 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.00019056158215504634, |
|
"loss": 3.0218, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.00019027537574524097, |
|
"loss": 3.016, |
|
"step": 6560 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 0.0001899890112281328, |
|
"loss": 3.0082, |
|
"step": 6570 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 0.0001897024897278907, |
|
"loss": 3.0206, |
|
"step": 6580 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 0.00018941581236929975, |
|
"loss": 3.024, |
|
"step": 6590 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.00018912898027775683, |
|
"loss": 3.0257, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.00018884199457926633, |
|
"loss": 3.0314, |
|
"step": 6610 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.0001885548564004356, |
|
"loss": 3.0197, |
|
"step": 6620 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.00018826756686847059, |
|
"loss": 3.0035, |
|
"step": 6630 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.0001879801271111714, |
|
"loss": 3.0193, |
|
"step": 6640 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.0001876925382569279, |
|
"loss": 3.009, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.00018740480143471518, |
|
"loss": 3.0146, |
|
"step": 6660 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.00018711691777408926, |
|
"loss": 3.0116, |
|
"step": 6670 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.00018682888840518256, |
|
"loss": 3.0063, |
|
"step": 6680 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.00018654071445869965, |
|
"loss": 3.0291, |
|
"step": 6690 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.0001862523970659124, |
|
"loss": 3.0157, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 0.00018596393735865606, |
|
"loss": 3.0168, |
|
"step": 6710 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.0001856753364693244, |
|
"loss": 3.0159, |
|
"step": 6720 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.00018538659553086546, |
|
"loss": 3.0221, |
|
"step": 6730 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.00018509771567677704, |
|
"loss": 3.0041, |
|
"step": 6740 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.00018480869804110236, |
|
"loss": 3.0101, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.00018451954375842543, |
|
"loss": 3.0115, |
|
"step": 6760 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 0.00018423025396386672, |
|
"loss": 3.0159, |
|
"step": 6770 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 0.00018394082979307868, |
|
"loss": 3.0139, |
|
"step": 6780 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.00018365127238224134, |
|
"loss": 2.995, |
|
"step": 6790 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 0.00018336158286805763, |
|
"loss": 3.0203, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 0.00018307176238774924, |
|
"loss": 3.0184, |
|
"step": 6810 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.00018278181207905184, |
|
"loss": 3.009, |
|
"step": 6820 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.00018249173308021089, |
|
"loss": 3.0187, |
|
"step": 6830 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 0.00018220152652997693, |
|
"loss": 3.011, |
|
"step": 6840 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.00018191119356760136, |
|
"loss": 3.0173, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.0001816207353328316, |
|
"loss": 3.0101, |
|
"step": 6860 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.00018133015296590717, |
|
"loss": 3.0029, |
|
"step": 6870 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 0.0001810394476075546, |
|
"loss": 3.0139, |
|
"step": 6880 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 0.00018074862039898338, |
|
"loss": 3.0091, |
|
"step": 6890 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.00018045767248188138, |
|
"loss": 3.0094, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.0001801666049984102, |
|
"loss": 3.0028, |
|
"step": 6910 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.0001798754190912009, |
|
"loss": 2.9853, |
|
"step": 6920 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 0.00017958411590334944, |
|
"loss": 3.0048, |
|
"step": 6930 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 0.00017929269657841212, |
|
"loss": 3.0061, |
|
"step": 6940 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.00017900116226040127, |
|
"loss": 2.9846, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.0001787095140937805, |
|
"loss": 2.9917, |
|
"step": 6960 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.00017841775322346043, |
|
"loss": 3.0093, |
|
"step": 6970 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 0.0001781258807947941, |
|
"loss": 2.9916, |
|
"step": 6980 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.0001778338979535725, |
|
"loss": 2.999, |
|
"step": 6990 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.00017754180584602015, |
|
"loss": 3.0212, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"eval_loss": 3.0574326515197754, |
|
"eval_runtime": 231.6039, |
|
"eval_samples_per_second": 328.729, |
|
"eval_steps_per_second": 5.872, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 0.00017724960561879026, |
|
"loss": 2.9958, |
|
"step": 7010 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 0.00017695729841896073, |
|
"loss": 2.9982, |
|
"step": 7020 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 0.0001766648853940293, |
|
"loss": 3.0031, |
|
"step": 7030 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 0.00017637236769190914, |
|
"loss": 3.0053, |
|
"step": 7040 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 0.00017607974646092437, |
|
"loss": 2.9969, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 0.00017578702284980555, |
|
"loss": 3.0035, |
|
"step": 7060 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00017549419800768498, |
|
"loss": 2.9902, |
|
"step": 7070 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00017520127308409263, |
|
"loss": 2.991, |
|
"step": 7080 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 0.00017490824922895116, |
|
"loss": 2.9895, |
|
"step": 7090 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 0.00017461512759257172, |
|
"loss": 2.9887, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.00017432190932564916, |
|
"loss": 2.9906, |
|
"step": 7110 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.00017402859557925782, |
|
"loss": 2.9885, |
|
"step": 7120 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.00017373518750484678, |
|
"loss": 2.993, |
|
"step": 7130 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 0.00017344168625423552, |
|
"loss": 2.9871, |
|
"step": 7140 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 0.0001731480929796091, |
|
"loss": 3.0084, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 0.00017285440883351406, |
|
"loss": 2.9945, |
|
"step": 7160 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 0.0001725606349688535, |
|
"loss": 2.9958, |
|
"step": 7170 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 0.0001722667725388828, |
|
"loss": 3.003, |
|
"step": 7180 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 0.00017197282269720504, |
|
"loss": 2.9912, |
|
"step": 7190 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.00017167878659776637, |
|
"loss": 2.989, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.0001713846653948517, |
|
"loss": 2.9867, |
|
"step": 7210 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 0.0001710904602430798, |
|
"loss": 2.9781, |
|
"step": 7220 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 0.00017079617229739922, |
|
"loss": 2.9936, |
|
"step": 7230 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 0.00017050180271308333, |
|
"loss": 2.98, |
|
"step": 7240 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 0.00017020735264572622, |
|
"loss": 2.9854, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.00016991282325123778, |
|
"loss": 2.9879, |
|
"step": 7260 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.00016961821568583919, |
|
"loss": 2.9999, |
|
"step": 7270 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.0001693235311060588, |
|
"loss": 3.0009, |
|
"step": 7280 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.00016902877066872707, |
|
"loss": 2.986, |
|
"step": 7290 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 0.00016873393553097236, |
|
"loss": 2.9756, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 0.0001684390268502162, |
|
"loss": 2.9828, |
|
"step": 7310 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 0.00016814404578416884, |
|
"loss": 2.9746, |
|
"step": 7320 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 0.00016784899349082472, |
|
"loss": 2.9849, |
|
"step": 7330 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 0.00016755387112845792, |
|
"loss": 2.9925, |
|
"step": 7340 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 0.00016725867985561748, |
|
"loss": 2.9901, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 0.00016696342083112308, |
|
"loss": 2.9781, |
|
"step": 7360 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 0.0001666680952140603, |
|
"loss": 2.9703, |
|
"step": 7370 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.00016637270416377617, |
|
"loss": 2.9757, |
|
"step": 7380 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.00016607724883987456, |
|
"loss": 2.9868, |
|
"step": 7390 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.0001657817304022117, |
|
"loss": 2.9959, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 0.00016548615001089155, |
|
"loss": 2.9855, |
|
"step": 7410 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 0.00016519050882626127, |
|
"loss": 2.9812, |
|
"step": 7420 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 0.00016489480800890665, |
|
"loss": 2.9826, |
|
"step": 7430 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.00016459904871964767, |
|
"loss": 2.9794, |
|
"step": 7440 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 0.0001643032321195338, |
|
"loss": 2.9787, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 0.00016400735936983945, |
|
"loss": 2.975, |
|
"step": 7460 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.00016371143163205947, |
|
"loss": 2.9893, |
|
"step": 7470 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.0001634154500679047, |
|
"loss": 2.9887, |
|
"step": 7480 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 0.00016311941583929706, |
|
"loss": 2.9831, |
|
"step": 7490 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.00016282333010836543, |
|
"loss": 2.9627, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"eval_loss": 3.032374858856201, |
|
"eval_runtime": 231.6607, |
|
"eval_samples_per_second": 328.649, |
|
"eval_steps_per_second": 5.871, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.00016252719403744072, |
|
"loss": 2.9821, |
|
"step": 7510 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 0.00016223100878905153, |
|
"loss": 2.9728, |
|
"step": 7520 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 0.00016193477552591952, |
|
"loss": 2.9809, |
|
"step": 7530 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 0.00016163849541095472, |
|
"loss": 2.9836, |
|
"step": 7540 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.00016134216960725126, |
|
"loss": 2.9723, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.00016104579927808256, |
|
"loss": 2.9781, |
|
"step": 7560 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 0.00016074938558689674, |
|
"loss": 2.9796, |
|
"step": 7570 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 0.00016045292969731233, |
|
"loss": 2.9788, |
|
"step": 7580 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 0.00016015643277311327, |
|
"loss": 2.9822, |
|
"step": 7590 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 0.0001598598959782449, |
|
"loss": 2.9757, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00015956332047680874, |
|
"loss": 2.9686, |
|
"step": 7610 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00015926670743305856, |
|
"loss": 2.9649, |
|
"step": 7620 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.00015897005801139528, |
|
"loss": 2.979, |
|
"step": 7630 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 0.00015867337337636278, |
|
"loss": 2.9611, |
|
"step": 7640 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 0.00015837665469264306, |
|
"loss": 2.9606, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 0.00015807990312505186, |
|
"loss": 2.9722, |
|
"step": 7660 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 0.00015778311983853402, |
|
"loss": 2.9699, |
|
"step": 7670 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 0.0001574863059981588, |
|
"loss": 2.9647, |
|
"step": 7680 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 0.00015718946276911555, |
|
"loss": 2.9704, |
|
"step": 7690 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 0.00015689259131670887, |
|
"loss": 2.9684, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.00015659569280635418, |
|
"loss": 2.9748, |
|
"step": 7710 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.00015629876840357322, |
|
"loss": 2.9708, |
|
"step": 7720 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.00015600181927398918, |
|
"loss": 2.9732, |
|
"step": 7730 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 0.00015570484658332249, |
|
"loss": 2.9672, |
|
"step": 7740 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 0.00015540785149738596, |
|
"loss": 2.9743, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 0.00015511083518208053, |
|
"loss": 2.9541, |
|
"step": 7760 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 0.00015481379880339012, |
|
"loss": 2.9664, |
|
"step": 7770 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 0.00015451674352737768, |
|
"loss": 2.9756, |
|
"step": 7780 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 0.00015421967052018032, |
|
"loss": 2.965, |
|
"step": 7790 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.0001539225809480047, |
|
"loss": 2.9597, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.00015362547597712247, |
|
"loss": 2.9646, |
|
"step": 7810 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 0.00015332835677386587, |
|
"loss": 2.9423, |
|
"step": 7820 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 0.0001530312245046228, |
|
"loss": 2.97, |
|
"step": 7830 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 0.0001527340803358326, |
|
"loss": 2.9503, |
|
"step": 7840 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 0.00015243692543398133, |
|
"loss": 2.9649, |
|
"step": 7850 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 0.0001521397609655972, |
|
"loss": 2.9552, |
|
"step": 7860 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 0.0001518425880972459, |
|
"loss": 2.9635, |
|
"step": 7870 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 0.0001515454079955261, |
|
"loss": 2.9696, |
|
"step": 7880 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 0.00015124822182706484, |
|
"loss": 2.9638, |
|
"step": 7890 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 0.0001509510307585131, |
|
"loss": 2.9675, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.000150653835956541, |
|
"loss": 2.957, |
|
"step": 7910 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.00015035663858783336, |
|
"loss": 2.9649, |
|
"step": 7920 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.00015005943981908498, |
|
"loss": 2.959, |
|
"step": 7930 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0001497622408169963, |
|
"loss": 2.9612, |
|
"step": 7940 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 0.00014946504274826854, |
|
"loss": 2.9554, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 0.00014916784677959934, |
|
"loss": 2.9646, |
|
"step": 7960 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 0.00014887065407767814, |
|
"loss": 2.9496, |
|
"step": 7970 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 0.00014857346580918144, |
|
"loss": 2.9502, |
|
"step": 7980 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.00014827628314076837, |
|
"loss": 2.9422, |
|
"step": 7990 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.00014797910723907603, |
|
"loss": 2.9347, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"eval_loss": 3.01143479347229, |
|
"eval_runtime": 232.2812, |
|
"eval_samples_per_second": 327.771, |
|
"eval_steps_per_second": 5.855, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.0001476819392707152, |
|
"loss": 2.9407, |
|
"step": 8010 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.00014738478040226524, |
|
"loss": 2.9393, |
|
"step": 8020 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 0.00014708763180026985, |
|
"loss": 2.9287, |
|
"step": 8030 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 0.0001467904946312325, |
|
"loss": 2.9391, |
|
"step": 8040 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 0.00014649337006161176, |
|
"loss": 2.9454, |
|
"step": 8050 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 0.00014619625925781665, |
|
"loss": 2.9597, |
|
"step": 8060 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00014589916338620233, |
|
"loss": 2.9465, |
|
"step": 8070 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.0001456020836130651, |
|
"loss": 2.9496, |
|
"step": 8080 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 0.00014530502110463825, |
|
"loss": 2.9388, |
|
"step": 8090 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 0.00014500797702708725, |
|
"loss": 2.9439, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 0.0001447109525465052, |
|
"loss": 2.9382, |
|
"step": 8110 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.00014441394882890824, |
|
"loss": 2.9319, |
|
"step": 8120 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0001441169670402311, |
|
"loss": 2.9512, |
|
"step": 8130 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 0.00014382000834632227, |
|
"loss": 2.9437, |
|
"step": 8140 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00014352307391293973, |
|
"loss": 2.9514, |
|
"step": 8150 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00014322616490574614, |
|
"loss": 2.9437, |
|
"step": 8160 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 0.00014292928249030434, |
|
"loss": 2.9427, |
|
"step": 8170 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 0.00014263242783207284, |
|
"loss": 2.9398, |
|
"step": 8180 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 0.0001423356020964011, |
|
"loss": 2.9319, |
|
"step": 8190 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 0.00014203880644852518, |
|
"loss": 2.9489, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 0.00014174204205356287, |
|
"loss": 2.9451, |
|
"step": 8210 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 0.0001414453100765094, |
|
"loss": 2.9535, |
|
"step": 8220 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.00014114861168223255, |
|
"loss": 2.9362, |
|
"step": 8230 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 0.00014085194803546846, |
|
"loss": 2.9405, |
|
"step": 8240 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 0.00014055532030081682, |
|
"loss": 2.943, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.00014025872964273627, |
|
"loss": 2.9429, |
|
"step": 8260 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.00013996217722553995, |
|
"loss": 2.9638, |
|
"step": 8270 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.0001396656642133909, |
|
"loss": 2.938, |
|
"step": 8280 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 0.00013936919177029742, |
|
"loss": 2.931, |
|
"step": 8290 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 0.00013907276106010858, |
|
"loss": 2.9352, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.00013877637324650961, |
|
"loss": 2.9381, |
|
"step": 8310 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.00013848002949301736, |
|
"loss": 2.9373, |
|
"step": 8320 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 0.00013818373096297567, |
|
"loss": 2.9279, |
|
"step": 8330 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 0.00013788747881955096, |
|
"loss": 2.936, |
|
"step": 8340 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 0.00013759127422572736, |
|
"loss": 2.9489, |
|
"step": 8350 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.00013729511834430252, |
|
"loss": 2.9451, |
|
"step": 8360 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 0.00013699901233788276, |
|
"loss": 2.9402, |
|
"step": 8370 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 0.0001367029573688786, |
|
"loss": 2.9519, |
|
"step": 8380 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00013640695459950038, |
|
"loss": 2.9457, |
|
"step": 8390 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00013611100519175326, |
|
"loss": 2.9177, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 0.0001358151103074331, |
|
"loss": 2.928, |
|
"step": 8410 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.00013551927110812162, |
|
"loss": 2.9448, |
|
"step": 8420 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.00013522348875518207, |
|
"loss": 2.9362, |
|
"step": 8430 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 0.00013492776440975437, |
|
"loss": 2.9273, |
|
"step": 8440 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 0.00013463209923275085, |
|
"loss": 2.9254, |
|
"step": 8450 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 0.0001343364943848515, |
|
"loss": 2.9433, |
|
"step": 8460 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 0.0001340409510264995, |
|
"loss": 2.9455, |
|
"step": 8470 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 0.00013374547031789672, |
|
"loss": 2.9362, |
|
"step": 8480 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 0.0001334500534189989, |
|
"loss": 2.9327, |
|
"step": 8490 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 0.00013315470148951146, |
|
"loss": 2.9371, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"eval_loss": 2.9945642948150635, |
|
"eval_runtime": 231.6664, |
|
"eval_samples_per_second": 328.641, |
|
"eval_steps_per_second": 5.871, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 0.0001328594156888847, |
|
"loss": 2.9253, |
|
"step": 8510 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 0.00013256419717630932, |
|
"loss": 2.9306, |
|
"step": 8520 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 0.00013226904711071191, |
|
"loss": 2.9248, |
|
"step": 8530 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 0.00013197396665075035, |
|
"loss": 2.9175, |
|
"step": 8540 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.0001316789569548092, |
|
"loss": 2.9245, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 0.0001313840191809953, |
|
"loss": 2.9269, |
|
"step": 8560 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 0.00013108915448713313, |
|
"loss": 2.9211, |
|
"step": 8570 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 0.00013079436403076026, |
|
"loss": 2.9288, |
|
"step": 8580 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 0.0001304996489691229, |
|
"loss": 2.9245, |
|
"step": 8590 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 0.0001302050104591712, |
|
"loss": 2.9152, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 0.00012991044965755482, |
|
"loss": 2.9415, |
|
"step": 8610 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 0.00012961596772061845, |
|
"loss": 2.9324, |
|
"step": 8620 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.00012932156580439705, |
|
"loss": 2.9423, |
|
"step": 8630 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 0.0001290272450646115, |
|
"loss": 2.9338, |
|
"step": 8640 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 0.00012873300665666398, |
|
"loss": 2.9298, |
|
"step": 8650 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 0.00012843885173563358, |
|
"loss": 2.9212, |
|
"step": 8660 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 0.0001281447814562715, |
|
"loss": 2.9332, |
|
"step": 8670 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 0.00012785079697299676, |
|
"loss": 2.9262, |
|
"step": 8680 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 0.0001275568994398916, |
|
"loss": 2.9117, |
|
"step": 8690 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 0.00012726309001069678, |
|
"loss": 2.9185, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 0.0001269693698388073, |
|
"loss": 2.9324, |
|
"step": 8710 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 0.00012667574007726778, |
|
"loss": 2.918, |
|
"step": 8720 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 0.0001263822018787679, |
|
"loss": 2.9052, |
|
"step": 8730 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.00012608875639563786, |
|
"loss": 2.9041, |
|
"step": 8740 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.00012579540477984395, |
|
"loss": 2.9162, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 0.0001255021481829839, |
|
"loss": 2.9222, |
|
"step": 8760 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.00012520898775628236, |
|
"loss": 2.9139, |
|
"step": 8770 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.00012491592465058683, |
|
"loss": 2.9275, |
|
"step": 8780 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 0.00012462296001636224, |
|
"loss": 2.9076, |
|
"step": 8790 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 0.0001243300950036873, |
|
"loss": 2.9258, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 0.0001240373307622495, |
|
"loss": 2.9246, |
|
"step": 8810 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 0.0001237446684413408, |
|
"loss": 2.9109, |
|
"step": 8820 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 0.00012345210918985306, |
|
"loss": 2.9298, |
|
"step": 8830 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.00012315965415627347, |
|
"loss": 2.9193, |
|
"step": 8840 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 0.0001228673044886801, |
|
"loss": 2.9327, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 0.0001225750613347374, |
|
"loss": 2.9271, |
|
"step": 8860 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.00012228292584169173, |
|
"loss": 2.9148, |
|
"step": 8870 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 0.00012199089915636677, |
|
"loss": 2.922, |
|
"step": 8880 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 0.00012169898242515907, |
|
"loss": 2.9128, |
|
"step": 8890 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 0.00012140717679403355, |
|
"loss": 2.9194, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.0001211154834085189, |
|
"loss": 2.9179, |
|
"step": 8910 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.0001208239034137033, |
|
"loss": 2.9243, |
|
"step": 8920 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 0.0001205324379542297, |
|
"loss": 2.9277, |
|
"step": 8930 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 0.0001202410881742915, |
|
"loss": 2.9141, |
|
"step": 8940 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.00011994985521762797, |
|
"loss": 2.9172, |
|
"step": 8950 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 0.0001196587402275197, |
|
"loss": 2.9157, |
|
"step": 8960 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 0.00011936774434678416, |
|
"loss": 2.9087, |
|
"step": 8970 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 0.00011907686871777146, |
|
"loss": 2.9169, |
|
"step": 8980 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.00011878611448235945, |
|
"loss": 2.9167, |
|
"step": 8990 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.00011849548278194941, |
|
"loss": 2.9293, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"eval_loss": 2.9773881435394287, |
|
"eval_runtime": 231.746, |
|
"eval_samples_per_second": 328.528, |
|
"eval_steps_per_second": 5.868, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 0.00011820497475746169, |
|
"loss": 2.9055, |
|
"step": 9010 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.0001179145915493311, |
|
"loss": 2.9215, |
|
"step": 9020 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.00011762433429750244, |
|
"loss": 2.8982, |
|
"step": 9030 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 0.00011733420414142608, |
|
"loss": 2.9057, |
|
"step": 9040 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 0.00011704420222005342, |
|
"loss": 2.9034, |
|
"step": 9050 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 0.00011675432967183248, |
|
"loss": 2.9133, |
|
"step": 9060 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.0001164645876347034, |
|
"loss": 2.9116, |
|
"step": 9070 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.00011617497724609403, |
|
"loss": 2.9133, |
|
"step": 9080 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 0.00011588549964291528, |
|
"loss": 2.9252, |
|
"step": 9090 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00011559615596155694, |
|
"loss": 2.9056, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00011530694733788294, |
|
"loss": 2.9046, |
|
"step": 9110 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 0.00011501787490722712, |
|
"loss": 2.9103, |
|
"step": 9120 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.00011472893980438864, |
|
"loss": 2.905, |
|
"step": 9130 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.00011444014316362751, |
|
"loss": 2.9042, |
|
"step": 9140 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 0.00011415148611866027, |
|
"loss": 2.9129, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 0.00011386296980265537, |
|
"loss": 2.9154, |
|
"step": 9160 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 0.0001135745953482289, |
|
"loss": 2.9139, |
|
"step": 9170 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 0.00011328636388743995, |
|
"loss": 2.908, |
|
"step": 9180 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 0.00011299827655178641, |
|
"loss": 2.9102, |
|
"step": 9190 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 0.00011271033447220016, |
|
"loss": 2.8991, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.00011242253877904306, |
|
"loss": 2.9127, |
|
"step": 9210 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.00011213489060210219, |
|
"loss": 2.9128, |
|
"step": 9220 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.00011184739107058563, |
|
"loss": 2.9016, |
|
"step": 9230 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.00011156004131311779, |
|
"loss": 2.8966, |
|
"step": 9240 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.00011127284245773531, |
|
"loss": 2.8996, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 0.0001109857956318822, |
|
"loss": 2.8952, |
|
"step": 9260 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 0.00011069890196240588, |
|
"loss": 2.901, |
|
"step": 9270 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.00011041216257555238, |
|
"loss": 2.9066, |
|
"step": 9280 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 0.00011012557859696219, |
|
"loss": 2.9108, |
|
"step": 9290 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 0.0001098391511516656, |
|
"loss": 2.9177, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 0.00010955288136407848, |
|
"loss": 2.9016, |
|
"step": 9310 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00010926677035799775, |
|
"loss": 2.9062, |
|
"step": 9320 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00010898081925659703, |
|
"loss": 2.894, |
|
"step": 9330 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 0.00010869502918242223, |
|
"loss": 2.9177, |
|
"step": 9340 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 0.00010840940125738706, |
|
"loss": 2.8976, |
|
"step": 9350 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00010812393660276874, |
|
"loss": 2.9123, |
|
"step": 9360 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 0.00010783863633920356, |
|
"loss": 2.9176, |
|
"step": 9370 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 0.00010755350158668245, |
|
"loss": 2.9107, |
|
"step": 9380 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 0.00010726853346454653, |
|
"loss": 2.9117, |
|
"step": 9390 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 0.00010698373309148293, |
|
"loss": 2.8976, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 0.00010669910158552012, |
|
"loss": 2.8997, |
|
"step": 9410 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 0.00010641464006402368, |
|
"loss": 2.9049, |
|
"step": 9420 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.00010613034964369196, |
|
"loss": 2.8854, |
|
"step": 9430 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.00010584623144055157, |
|
"loss": 2.8868, |
|
"step": 9440 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.0001055622865699531, |
|
"loss": 2.9141, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.0001052785161465666, |
|
"loss": 2.9023, |
|
"step": 9460 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 0.0001049949212843774, |
|
"loss": 2.9132, |
|
"step": 9470 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 0.00010471150309668156, |
|
"loss": 2.9008, |
|
"step": 9480 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 0.00010442826269608168, |
|
"loss": 2.9089, |
|
"step": 9490 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 0.00010414520119448234, |
|
"loss": 2.9053, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"eval_loss": 2.962033987045288, |
|
"eval_runtime": 232.0169, |
|
"eval_samples_per_second": 328.144, |
|
"eval_steps_per_second": 5.862, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00010386231970308587, |
|
"loss": 2.8956, |
|
"step": 9510 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00010357961933238784, |
|
"loss": 2.9039, |
|
"step": 9520 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.00010329710119217296, |
|
"loss": 2.8843, |
|
"step": 9530 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.00010301476639151046, |
|
"loss": 2.8909, |
|
"step": 9540 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 0.00010273261603874986, |
|
"loss": 2.8955, |
|
"step": 9550 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.00010245065124151664, |
|
"loss": 2.8872, |
|
"step": 9560 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.00010216887310670784, |
|
"loss": 2.8843, |
|
"step": 9570 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 0.00010188728274048766, |
|
"loss": 2.8933, |
|
"step": 9580 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00010160588124828328, |
|
"loss": 2.8857, |
|
"step": 9590 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00010132466973478038, |
|
"loss": 2.8785, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 0.0001010436493039188, |
|
"loss": 2.9006, |
|
"step": 9610 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.00010076282105888835, |
|
"loss": 2.8947, |
|
"step": 9620 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.00010048218610212434, |
|
"loss": 2.8959, |
|
"step": 9630 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 0.00010020174553530328, |
|
"loss": 2.895, |
|
"step": 9640 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 9.992150045933858e-05, |
|
"loss": 2.8819, |
|
"step": 9650 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 9.96414519743763e-05, |
|
"loss": 2.8811, |
|
"step": 9660 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 9.93616011797906e-05, |
|
"loss": 2.8857, |
|
"step": 9670 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 9.908194917417967e-05, |
|
"loss": 2.8937, |
|
"step": 9680 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 9.880249705536131e-05, |
|
"loss": 2.8767, |
|
"step": 9690 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 9.852324592036866e-05, |
|
"loss": 2.8863, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 9.82441968654458e-05, |
|
"loss": 2.8859, |
|
"step": 9710 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 9.796535098604359e-05, |
|
"loss": 2.8737, |
|
"step": 9720 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 9.768670937681522e-05, |
|
"loss": 2.8882, |
|
"step": 9730 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 9.740827313161197e-05, |
|
"loss": 2.8795, |
|
"step": 9740 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 9.713004334347913e-05, |
|
"loss": 2.8915, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 9.685202110465122e-05, |
|
"loss": 2.8816, |
|
"step": 9760 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 9.657420750654819e-05, |
|
"loss": 2.8877, |
|
"step": 9770 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 9.629660363977089e-05, |
|
"loss": 2.8866, |
|
"step": 9780 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 9.60192105940968e-05, |
|
"loss": 2.8851, |
|
"step": 9790 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 9.574202945847584e-05, |
|
"loss": 2.9005, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 9.546506132102602e-05, |
|
"loss": 2.8955, |
|
"step": 9810 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 9.518830726902917e-05, |
|
"loss": 2.8784, |
|
"step": 9820 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 9.491176838892672e-05, |
|
"loss": 2.8888, |
|
"step": 9830 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 9.463544576631543e-05, |
|
"loss": 2.893, |
|
"step": 9840 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 9.435934048594305e-05, |
|
"loss": 2.8771, |
|
"step": 9850 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 9.408345363170418e-05, |
|
"loss": 2.8894, |
|
"step": 9860 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 9.380778628663594e-05, |
|
"loss": 2.8968, |
|
"step": 9870 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 9.353233953291368e-05, |
|
"loss": 2.8816, |
|
"step": 9880 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 9.325711445184682e-05, |
|
"loss": 2.8863, |
|
"step": 9890 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 9.29821121238746e-05, |
|
"loss": 2.895, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 9.270733362856175e-05, |
|
"loss": 2.8737, |
|
"step": 9910 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 9.24327800445944e-05, |
|
"loss": 2.8894, |
|
"step": 9920 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 9.215845244977561e-05, |
|
"loss": 2.8835, |
|
"step": 9930 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 9.188435192102137e-05, |
|
"loss": 2.8817, |
|
"step": 9940 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 9.161047953435637e-05, |
|
"loss": 2.8659, |
|
"step": 9950 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 9.133683636490959e-05, |
|
"loss": 2.8942, |
|
"step": 9960 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 9.106342348691015e-05, |
|
"loss": 2.8824, |
|
"step": 9970 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 9.079024197368319e-05, |
|
"loss": 2.8895, |
|
"step": 9980 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 9.05172928976456e-05, |
|
"loss": 2.8905, |
|
"step": 9990 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 9.024457733030174e-05, |
|
"loss": 2.8819, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"eval_loss": 2.95068359375, |
|
"eval_runtime": 232.6252, |
|
"eval_samples_per_second": 327.286, |
|
"eval_steps_per_second": 5.846, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 8.997209634223937e-05, |
|
"loss": 2.881, |
|
"step": 10010 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 8.969985100312532e-05, |
|
"loss": 2.8876, |
|
"step": 10020 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 8.942784238170134e-05, |
|
"loss": 2.8745, |
|
"step": 10030 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 8.915607154577993e-05, |
|
"loss": 2.8599, |
|
"step": 10040 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 8.888453956224013e-05, |
|
"loss": 2.8831, |
|
"step": 10050 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 8.861324749702332e-05, |
|
"loss": 2.8706, |
|
"step": 10060 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 8.834219641512906e-05, |
|
"loss": 2.8843, |
|
"step": 10070 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 8.807138738061083e-05, |
|
"loss": 2.8908, |
|
"step": 10080 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 8.780082145657197e-05, |
|
"loss": 2.8741, |
|
"step": 10090 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 8.75304997051615e-05, |
|
"loss": 2.8586, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 8.726042318756976e-05, |
|
"loss": 2.8802, |
|
"step": 10110 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 8.699059296402449e-05, |
|
"loss": 2.8618, |
|
"step": 10120 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 8.672101009378648e-05, |
|
"loss": 2.8803, |
|
"step": 10130 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 8.64516756351457e-05, |
|
"loss": 2.8695, |
|
"step": 10140 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 8.618259064541675e-05, |
|
"loss": 2.8637, |
|
"step": 10150 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 8.591375618093491e-05, |
|
"loss": 2.8873, |
|
"step": 10160 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 8.564517329705207e-05, |
|
"loss": 2.885, |
|
"step": 10170 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 8.537684304813239e-05, |
|
"loss": 2.8809, |
|
"step": 10180 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 8.510876648754847e-05, |
|
"loss": 2.8691, |
|
"step": 10190 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 8.484094466767672e-05, |
|
"loss": 2.8832, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 8.457337863989376e-05, |
|
"loss": 2.8705, |
|
"step": 10210 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 8.430606945457198e-05, |
|
"loss": 2.8769, |
|
"step": 10220 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 8.403901816107552e-05, |
|
"loss": 2.8736, |
|
"step": 10230 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 8.37722258077561e-05, |
|
"loss": 2.8717, |
|
"step": 10240 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 8.350569344194892e-05, |
|
"loss": 2.8683, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 8.32394221099686e-05, |
|
"loss": 2.8738, |
|
"step": 10260 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 8.297341285710497e-05, |
|
"loss": 2.8846, |
|
"step": 10270 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 8.270766672761908e-05, |
|
"loss": 2.8783, |
|
"step": 10280 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 8.244218476473907e-05, |
|
"loss": 2.879, |
|
"step": 10290 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 8.217696801065596e-05, |
|
"loss": 2.8674, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 8.191201750651978e-05, |
|
"loss": 2.8727, |
|
"step": 10310 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 8.164733429243518e-05, |
|
"loss": 2.874, |
|
"step": 10320 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 8.13829194074576e-05, |
|
"loss": 2.8679, |
|
"step": 10330 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 8.111877388958924e-05, |
|
"loss": 2.8904, |
|
"step": 10340 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 8.08548987757747e-05, |
|
"loss": 2.8785, |
|
"step": 10350 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 8.059129510189713e-05, |
|
"loss": 2.8733, |
|
"step": 10360 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 8.032796390277408e-05, |
|
"loss": 2.8868, |
|
"step": 10370 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 8.006490621215347e-05, |
|
"loss": 2.8696, |
|
"step": 10380 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 7.980212306270948e-05, |
|
"loss": 2.8869, |
|
"step": 10390 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 7.953961548603865e-05, |
|
"loss": 2.884, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 7.927738451265551e-05, |
|
"loss": 2.8794, |
|
"step": 10410 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 7.901543117198893e-05, |
|
"loss": 2.8789, |
|
"step": 10420 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 7.875375649237784e-05, |
|
"loss": 2.8817, |
|
"step": 10430 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 7.849236150106718e-05, |
|
"loss": 2.8697, |
|
"step": 10440 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 7.823124722420402e-05, |
|
"loss": 2.8961, |
|
"step": 10450 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 7.797041468683338e-05, |
|
"loss": 2.8773, |
|
"step": 10460 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 7.770986491289426e-05, |
|
"loss": 2.8721, |
|
"step": 10470 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 7.74495989252157e-05, |
|
"loss": 2.8515, |
|
"step": 10480 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 7.71896177455126e-05, |
|
"loss": 2.8621, |
|
"step": 10490 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 7.692992239438188e-05, |
|
"loss": 2.8776, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"eval_loss": 2.939117908477783, |
|
"eval_runtime": 232.8018, |
|
"eval_samples_per_second": 327.038, |
|
"eval_steps_per_second": 5.842, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 7.667051389129841e-05, |
|
"loss": 2.8717, |
|
"step": 10510 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 7.641139325461076e-05, |
|
"loss": 2.8705, |
|
"step": 10520 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 7.615256150153781e-05, |
|
"loss": 2.8668, |
|
"step": 10530 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 7.589401964816412e-05, |
|
"loss": 2.8688, |
|
"step": 10540 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 7.563576870943629e-05, |
|
"loss": 2.8642, |
|
"step": 10550 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 7.537780969915886e-05, |
|
"loss": 2.8552, |
|
"step": 10560 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 7.512014362999039e-05, |
|
"loss": 2.8675, |
|
"step": 10570 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 7.48627715134394e-05, |
|
"loss": 2.8693, |
|
"step": 10580 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 7.460569435986052e-05, |
|
"loss": 2.8556, |
|
"step": 10590 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 7.434891317845051e-05, |
|
"loss": 2.8581, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 7.4092428977244e-05, |
|
"loss": 2.8718, |
|
"step": 10610 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 7.383624276311e-05, |
|
"loss": 2.8615, |
|
"step": 10620 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 7.358035554174769e-05, |
|
"loss": 2.8594, |
|
"step": 10630 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 7.332476831768248e-05, |
|
"loss": 2.8669, |
|
"step": 10640 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 7.306948209426205e-05, |
|
"loss": 2.8654, |
|
"step": 10650 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 7.281449787365257e-05, |
|
"loss": 2.8794, |
|
"step": 10660 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 7.255981665683453e-05, |
|
"loss": 2.881, |
|
"step": 10670 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 7.2305439443599e-05, |
|
"loss": 2.8784, |
|
"step": 10680 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 7.20513672325436e-05, |
|
"loss": 2.88, |
|
"step": 10690 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 7.179760102106864e-05, |
|
"loss": 2.8688, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 7.15441418053732e-05, |
|
"loss": 2.8732, |
|
"step": 10710 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 7.129099058045114e-05, |
|
"loss": 2.8554, |
|
"step": 10720 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 7.10381483400873e-05, |
|
"loss": 2.8797, |
|
"step": 10730 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 7.078561607685356e-05, |
|
"loss": 2.8754, |
|
"step": 10740 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 7.053339478210489e-05, |
|
"loss": 2.8756, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 7.028148544597552e-05, |
|
"loss": 2.8688, |
|
"step": 10760 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 7.002988905737509e-05, |
|
"loss": 2.868, |
|
"step": 10770 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 6.977860660398465e-05, |
|
"loss": 2.8689, |
|
"step": 10780 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 6.952763907225288e-05, |
|
"loss": 2.8753, |
|
"step": 10790 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 6.927698744739217e-05, |
|
"loss": 2.8586, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 6.902665271337484e-05, |
|
"loss": 2.8814, |
|
"step": 10810 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 6.877663585292903e-05, |
|
"loss": 2.8571, |
|
"step": 10820 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 6.852693784753517e-05, |
|
"loss": 2.8664, |
|
"step": 10830 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 6.827755967742196e-05, |
|
"loss": 2.8642, |
|
"step": 10840 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 6.802850232156247e-05, |
|
"loss": 2.8713, |
|
"step": 10850 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 6.777976675767046e-05, |
|
"loss": 2.874, |
|
"step": 10860 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 6.753135396219633e-05, |
|
"loss": 2.8779, |
|
"step": 10870 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 6.72832649103235e-05, |
|
"loss": 2.8744, |
|
"step": 10880 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 6.703550057596441e-05, |
|
"loss": 2.8613, |
|
"step": 10890 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 6.678806193175684e-05, |
|
"loss": 2.8682, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 6.654094994905998e-05, |
|
"loss": 2.8658, |
|
"step": 10910 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 6.629416559795065e-05, |
|
"loss": 2.8592, |
|
"step": 10920 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 6.604770984721951e-05, |
|
"loss": 2.8659, |
|
"step": 10930 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 6.580158366436729e-05, |
|
"loss": 2.8727, |
|
"step": 10940 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 6.555578801560087e-05, |
|
"loss": 2.8689, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 6.531032386582965e-05, |
|
"loss": 2.8724, |
|
"step": 10960 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 6.50651921786616e-05, |
|
"loss": 2.8648, |
|
"step": 10970 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 6.482039391639965e-05, |
|
"loss": 2.868, |
|
"step": 10980 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 6.457593004003771e-05, |
|
"loss": 2.8673, |
|
"step": 10990 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 6.433180150925708e-05, |
|
"loss": 2.8621, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"eval_loss": 2.929156541824341, |
|
"eval_runtime": 232.09, |
|
"eval_samples_per_second": 328.041, |
|
"eval_steps_per_second": 5.86, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 6.408800928242264e-05, |
|
"loss": 2.857, |
|
"step": 11010 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 6.38445543165789e-05, |
|
"loss": 2.8705, |
|
"step": 11020 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 6.360143756744655e-05, |
|
"loss": 2.8602, |
|
"step": 11030 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 6.335865998941852e-05, |
|
"loss": 2.868, |
|
"step": 11040 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 6.311622253555625e-05, |
|
"loss": 2.8562, |
|
"step": 11050 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 6.287412615758597e-05, |
|
"loss": 2.8695, |
|
"step": 11060 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 6.263237180589501e-05, |
|
"loss": 2.862, |
|
"step": 11070 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"learning_rate": 6.239096042952794e-05, |
|
"loss": 2.8519, |
|
"step": 11080 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"learning_rate": 6.2149892976183e-05, |
|
"loss": 2.8517, |
|
"step": 11090 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.190917039220828e-05, |
|
"loss": 2.8547, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.166879362259802e-05, |
|
"loss": 2.8427, |
|
"step": 11110 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 6.142876361098892e-05, |
|
"loss": 2.8593, |
|
"step": 11120 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 6.118908129965642e-05, |
|
"loss": 2.8549, |
|
"step": 11130 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 6.0949747629511036e-05, |
|
"loss": 2.8383, |
|
"step": 11140 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 6.07107635400946e-05, |
|
"loss": 2.8571, |
|
"step": 11150 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 6.0472129969576645e-05, |
|
"loss": 2.8464, |
|
"step": 11160 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 6.023384785475065e-05, |
|
"loss": 2.8512, |
|
"step": 11170 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 5.9995918131030436e-05, |
|
"loss": 2.8595, |
|
"step": 11180 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 5.975834173244644e-05, |
|
"loss": 2.8364, |
|
"step": 11190 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 5.9521119591642065e-05, |
|
"loss": 2.8371, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 5.928425263987001e-05, |
|
"loss": 2.8571, |
|
"step": 11210 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 5.9047741806988705e-05, |
|
"loss": 2.8545, |
|
"step": 11220 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 5.881158802145838e-05, |
|
"loss": 2.8447, |
|
"step": 11230 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 5.857579221033784e-05, |
|
"loss": 2.8493, |
|
"step": 11240 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 5.8340355299280496e-05, |
|
"loss": 2.8389, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 5.810527821253084e-05, |
|
"loss": 2.8658, |
|
"step": 11260 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 5.787056187292087e-05, |
|
"loss": 2.8495, |
|
"step": 11270 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 5.7636207201866364e-05, |
|
"loss": 2.8539, |
|
"step": 11280 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 5.740221511936332e-05, |
|
"loss": 2.849, |
|
"step": 11290 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 5.716858654398433e-05, |
|
"loss": 2.8382, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 5.6935322392875115e-05, |
|
"loss": 2.8427, |
|
"step": 11310 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 5.6702423581750553e-05, |
|
"loss": 2.8463, |
|
"step": 11320 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 5.646989102489148e-05, |
|
"loss": 2.8453, |
|
"step": 11330 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 5.623772563514092e-05, |
|
"loss": 2.8443, |
|
"step": 11340 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 5.600592832390051e-05, |
|
"loss": 2.8633, |
|
"step": 11350 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 5.5774500001126926e-05, |
|
"loss": 2.8373, |
|
"step": 11360 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 5.5543441575328366e-05, |
|
"loss": 2.8572, |
|
"step": 11370 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 5.5312753953560894e-05, |
|
"loss": 2.8572, |
|
"step": 11380 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 5.5082438041424956e-05, |
|
"loss": 2.8412, |
|
"step": 11390 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 5.485249474306175e-05, |
|
"loss": 2.8455, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 5.462292496114978e-05, |
|
"loss": 2.8576, |
|
"step": 11410 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 5.4393729596901265e-05, |
|
"loss": 2.8539, |
|
"step": 11420 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 5.4164909550058424e-05, |
|
"loss": 2.8403, |
|
"step": 11430 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 5.393646571889031e-05, |
|
"loss": 2.8586, |
|
"step": 11440 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 5.370839900018898e-05, |
|
"loss": 2.85, |
|
"step": 11450 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 5.348071028926611e-05, |
|
"loss": 2.8455, |
|
"step": 11460 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 5.325340047994945e-05, |
|
"loss": 2.8512, |
|
"step": 11470 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"learning_rate": 5.302647046457927e-05, |
|
"loss": 2.8383, |
|
"step": 11480 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"learning_rate": 5.279992113400489e-05, |
|
"loss": 2.8568, |
|
"step": 11490 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 5.257375337758134e-05, |
|
"loss": 2.8486, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"eval_loss": 2.922297716140747, |
|
"eval_runtime": 232.8422, |
|
"eval_samples_per_second": 326.981, |
|
"eval_steps_per_second": 5.841, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 5.2347968083165605e-05, |
|
"loss": 2.8533, |
|
"step": 11510 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 5.212256613711317e-05, |
|
"loss": 2.8399, |
|
"step": 11520 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 5.189754842427475e-05, |
|
"loss": 2.8422, |
|
"step": 11530 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 5.1672915827992675e-05, |
|
"loss": 2.8355, |
|
"step": 11540 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 5.144866923009742e-05, |
|
"loss": 2.8508, |
|
"step": 11550 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 5.122480951090417e-05, |
|
"loss": 2.8484, |
|
"step": 11560 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 5.100133754920935e-05, |
|
"loss": 2.8483, |
|
"step": 11570 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 5.077825422228717e-05, |
|
"loss": 2.8538, |
|
"step": 11580 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 5.0555560405886253e-05, |
|
"loss": 2.8633, |
|
"step": 11590 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 5.0333256974226065e-05, |
|
"loss": 2.8479, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 5.0111344799993585e-05, |
|
"loss": 2.8392, |
|
"step": 11610 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 4.9889824754339845e-05, |
|
"loss": 2.8392, |
|
"step": 11620 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 4.966869770687658e-05, |
|
"loss": 2.8422, |
|
"step": 11630 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 4.944796452567259e-05, |
|
"loss": 2.8465, |
|
"step": 11640 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 4.9227626077250625e-05, |
|
"loss": 2.8607, |
|
"step": 11650 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 4.900768322658383e-05, |
|
"loss": 2.8398, |
|
"step": 11660 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 4.8788136837092345e-05, |
|
"loss": 2.8458, |
|
"step": 11670 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 4.856898777063996e-05, |
|
"loss": 2.8606, |
|
"step": 11680 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 4.8350236887530624e-05, |
|
"loss": 2.8371, |
|
"step": 11690 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 4.8131885046505346e-05, |
|
"loss": 2.846, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 4.791393310473846e-05, |
|
"loss": 2.8548, |
|
"step": 11710 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 4.7696381917834535e-05, |
|
"loss": 2.845, |
|
"step": 11720 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 4.7479232339824786e-05, |
|
"loss": 2.86, |
|
"step": 11730 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 4.726248522316395e-05, |
|
"loss": 2.8588, |
|
"step": 11740 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 4.704614141872684e-05, |
|
"loss": 2.8491, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 4.6830201775805e-05, |
|
"loss": 2.8517, |
|
"step": 11760 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 4.661466714210333e-05, |
|
"loss": 2.8444, |
|
"step": 11770 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 4.639953836373685e-05, |
|
"loss": 2.8409, |
|
"step": 11780 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 4.6184816285227295e-05, |
|
"loss": 2.8288, |
|
"step": 11790 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 4.597050174949986e-05, |
|
"loss": 2.8427, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 4.575659559787986e-05, |
|
"loss": 2.855, |
|
"step": 11810 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 4.554309867008943e-05, |
|
"loss": 2.8484, |
|
"step": 11820 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 4.533001180424425e-05, |
|
"loss": 2.8502, |
|
"step": 11830 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 4.511733583685027e-05, |
|
"loss": 2.8428, |
|
"step": 11840 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 4.490507160280022e-05, |
|
"loss": 2.8495, |
|
"step": 11850 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 4.469321993537074e-05, |
|
"loss": 2.8366, |
|
"step": 11860 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 4.448178166621872e-05, |
|
"loss": 2.8526, |
|
"step": 11870 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 4.427075762537822e-05, |
|
"loss": 2.8478, |
|
"step": 11880 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.406014864125728e-05, |
|
"loss": 2.851, |
|
"step": 11890 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.384995554063444e-05, |
|
"loss": 2.8435, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 4.36401791486557e-05, |
|
"loss": 2.8464, |
|
"step": 11910 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 4.343082028883115e-05, |
|
"loss": 2.8501, |
|
"step": 11920 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 4.32218797830319e-05, |
|
"loss": 2.8499, |
|
"step": 11930 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 4.3013358451486554e-05, |
|
"loss": 2.8398, |
|
"step": 11940 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 4.280525711277835e-05, |
|
"loss": 2.8377, |
|
"step": 11950 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 4.259757658384172e-05, |
|
"loss": 2.8344, |
|
"step": 11960 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 4.239031767995913e-05, |
|
"loss": 2.8476, |
|
"step": 11970 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 4.218348121475794e-05, |
|
"loss": 2.848, |
|
"step": 11980 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 4.19770680002071e-05, |
|
"loss": 2.8589, |
|
"step": 11990 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 4.1771078846614056e-05, |
|
"loss": 2.8466, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"eval_loss": 2.9153051376342773, |
|
"eval_runtime": 233.3059, |
|
"eval_samples_per_second": 326.331, |
|
"eval_steps_per_second": 5.829, |
|
"step": 12000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 15860, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.007955407686861e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|