Update BERT large uncased checkpoint after running phase 1 (SL 128) and phase 2 (SL 512)
387585b
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 2.035964035964036, | |
"global_step": 2038, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0, | |
"learning_rate": 5.417624521072796e-05, | |
"loss": 3.9478, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00010835249042145592, | |
"loss": 3.4956, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00016252873563218388, | |
"loss": 3.5834, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.00021670498084291185, | |
"loss": 3.6582, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.00027088122605363983, | |
"loss": 3.6931, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00032505747126436777, | |
"loss": 3.5358, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00037923371647509576, | |
"loss": 3.4064, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.0004334099616858237, | |
"loss": 3.6459, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.00048758620689655173, | |
"loss": 3.537, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0005417624521072797, | |
"loss": 3.5901, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0005959386973180077, | |
"loss": 3.354, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.0006501149425287355, | |
"loss": 3.5718, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.0007042911877394635, | |
"loss": 3.4404, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.0007584674329501915, | |
"loss": 3.6794, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.0008126436781609194, | |
"loss": 3.4177, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0008668199233716474, | |
"loss": 3.318, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0009209961685823754, | |
"loss": 3.2787, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0009751724137931035, | |
"loss": 3.4573, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0010293486590038313, | |
"loss": 3.5262, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0010835249042145593, | |
"loss": 3.5019, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0011377011494252873, | |
"loss": 3.4242, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.0011918773946360153, | |
"loss": 3.321, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.0012460536398467433, | |
"loss": 3.2642, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.001300229885057471, | |
"loss": 3.4654, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.001354406130268199, | |
"loss": 3.5731, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.001408582375478927, | |
"loss": 3.5807, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.0014627586206896553, | |
"loss": 3.3095, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.001516934865900383, | |
"loss": 3.2068, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.001571111111111111, | |
"loss": 3.3576, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.0016252873563218388, | |
"loss": 3.1022, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.001679463601532567, | |
"loss": 3.3052, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.0017336398467432948, | |
"loss": 3.1333, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.001787816091954023, | |
"loss": 3.1107, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.0018419923371647507, | |
"loss": 3.2723, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.0018961685823754787, | |
"loss": 3.0396, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.001950344827586207, | |
"loss": 3.1269, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0020045210727969347, | |
"loss": 3.4973, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.0020586973180076627, | |
"loss": 3.2676, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.0021128735632183907, | |
"loss": 2.7068, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.0021670498084291187, | |
"loss": 3.0751, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.0022212260536398467, | |
"loss": 3.0416, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.0022754022988505746, | |
"loss": 3.1006, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.002329578544061302, | |
"loss": 2.8475, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.0023837547892720306, | |
"loss": 3.1305, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.002437931034482758, | |
"loss": 3.0844, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.0024921072796934866, | |
"loss": 2.8136, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.0025462835249042146, | |
"loss": 2.9123, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.002600459770114942, | |
"loss": 3.0378, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.0026546360153256706, | |
"loss": 2.6106, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.002708812260536398, | |
"loss": 2.7494, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.002762988505747126, | |
"loss": 2.8047, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.002817164750957854, | |
"loss": 2.4465, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.002821634214969049, | |
"loss": 2.6294, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00281367698368036, | |
"loss": 2.7954, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.002805719752391671, | |
"loss": 2.3837, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0027977625211029825, | |
"loss": 2.0739, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0027898052898142935, | |
"loss": 2.08, | |
"step": 285 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.002781848058525605, | |
"loss": 2.2492, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.002773890827236916, | |
"loss": 1.9265, | |
"step": 295 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.002765933595948227, | |
"loss": 1.9779, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0027579763646595383, | |
"loss": 1.8238, | |
"step": 305 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.0027500191333708493, | |
"loss": 1.6509, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.0027420619020821608, | |
"loss": 1.5633, | |
"step": 315 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.0027341046707934718, | |
"loss": 1.5235, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.002726147439504783, | |
"loss": 1.4549, | |
"step": 325 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.002718190208216094, | |
"loss": 1.4057, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0027102329769274056, | |
"loss": 1.4699, | |
"step": 335 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.002702275745638717, | |
"loss": 1.3995, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.002694318514350028, | |
"loss": 1.2831, | |
"step": 345 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.002686361283061339, | |
"loss": 1.4057, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.0026784040517726505, | |
"loss": 1.3694, | |
"step": 355 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0026704468204839615, | |
"loss": 1.3262, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.002662489589195273, | |
"loss": 1.3559, | |
"step": 365 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.002654532357906584, | |
"loss": 1.3316, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.002646575126617895, | |
"loss": 1.4259, | |
"step": 375 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0026386178953292064, | |
"loss": 1.3915, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0026306606640405174, | |
"loss": 1.3454, | |
"step": 385 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.002622703432751829, | |
"loss": 1.4387, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00261474620146314, | |
"loss": 1.3051, | |
"step": 395 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.002606788970174451, | |
"loss": 1.4805, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.0025988317388857623, | |
"loss": 1.4211, | |
"step": 405 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.0025908745075970733, | |
"loss": 1.3394, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.0025829172763083847, | |
"loss": 1.4346, | |
"step": 415 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.002574960045019696, | |
"loss": 1.4137, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.002567002813731007, | |
"loss": 1.407, | |
"step": 425 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0025590455824423186, | |
"loss": 1.4121, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0025510883511536296, | |
"loss": 1.3267, | |
"step": 435 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.002543131119864941, | |
"loss": 1.4152, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.002535173888576252, | |
"loss": 1.3324, | |
"step": 445 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.002527216657287563, | |
"loss": 1.3688, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.0025192594259988745, | |
"loss": 1.4605, | |
"step": 455 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0025113021947101855, | |
"loss": 1.3467, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.002503344963421497, | |
"loss": 1.3955, | |
"step": 465 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.002495387732132808, | |
"loss": 1.3075, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.002487430500844119, | |
"loss": 1.4311, | |
"step": 475 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.0024794732695554303, | |
"loss": 1.454, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.0024715160382667413, | |
"loss": 1.422, | |
"step": 485 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.0024635588069780528, | |
"loss": 1.2557, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.0024556015756893638, | |
"loss": 1.3965, | |
"step": 495 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.002447644344400675, | |
"loss": 1.3865, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.002439687113111986, | |
"loss": 1.3908, | |
"step": 505 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0024317298818232976, | |
"loss": 1.349, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0024237726505346086, | |
"loss": 1.2927, | |
"step": 515 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.00241581541924592, | |
"loss": 1.3646, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.002407858187957231, | |
"loss": 1.3899, | |
"step": 525 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.0023999009566685425, | |
"loss": 1.3826, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.0023919437253798535, | |
"loss": 1.3454, | |
"step": 535 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.002383986494091165, | |
"loss": 1.2858, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.002376029262802476, | |
"loss": 1.3471, | |
"step": 545 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.002368072031513787, | |
"loss": 1.3412, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.0023601148002250984, | |
"loss": 1.4073, | |
"step": 555 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.0023521575689364094, | |
"loss": 1.4214, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.002344200337647721, | |
"loss": 1.3379, | |
"step": 565 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.002336243106359032, | |
"loss": 1.4228, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.002328285875070343, | |
"loss": 1.433, | |
"step": 575 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.0023203286437816543, | |
"loss": 1.3152, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.0023123714124929653, | |
"loss": 1.3381, | |
"step": 585 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0023044141812042767, | |
"loss": 1.3229, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0022964569499155877, | |
"loss": 1.331, | |
"step": 595 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.002288499718626899, | |
"loss": 1.3322, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.0022805424873382106, | |
"loss": 1.284, | |
"step": 605 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.0022725852560495216, | |
"loss": 1.4003, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.002264628024760833, | |
"loss": 1.2798, | |
"step": 615 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.002256670793472144, | |
"loss": 1.3699, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.002248713562183455, | |
"loss": 1.2648, | |
"step": 625 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.0022407563308947664, | |
"loss": 1.2905, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.0022327990996060774, | |
"loss": 1.3432, | |
"step": 635 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.002224841868317389, | |
"loss": 1.3604, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0022168846370287, | |
"loss": 1.3513, | |
"step": 645 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.002208927405740011, | |
"loss": 1.3222, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.0022009701744513223, | |
"loss": 1.4611, | |
"step": 655 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.0021930129431626333, | |
"loss": 1.3717, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.0021850557118739448, | |
"loss": 1.3321, | |
"step": 665 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.0021770984805852558, | |
"loss": 1.242, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.0021691412492965668, | |
"loss": 1.3239, | |
"step": 675 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.002161184018007878, | |
"loss": 1.3978, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.0021532267867191896, | |
"loss": 1.3098, | |
"step": 685 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.0021452695554305006, | |
"loss": 1.3272, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.002137312324141812, | |
"loss": 1.3465, | |
"step": 695 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.002129355092853123, | |
"loss": 1.3863, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.0021213978615644345, | |
"loss": 1.2466, | |
"step": 705 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 0.0021134406302757455, | |
"loss": 1.2819, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 0.002105483398987057, | |
"loss": 1.2984, | |
"step": 715 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 0.002097526167698368, | |
"loss": 1.3937, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 0.002089568936409679, | |
"loss": 1.3559, | |
"step": 725 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 0.0020816117051209904, | |
"loss": 1.3167, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 0.0020736544738323014, | |
"loss": 1.312, | |
"step": 735 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 0.002065697242543613, | |
"loss": 1.4823, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 0.002057740011254924, | |
"loss": 1.3505, | |
"step": 745 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 0.002049782779966235, | |
"loss": 1.3069, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 0.0020418255486775463, | |
"loss": 1.2876, | |
"step": 755 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 0.0020338683173888573, | |
"loss": 1.3353, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 0.0020259110861001687, | |
"loss": 1.3717, | |
"step": 765 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 0.0020179538548114797, | |
"loss": 1.3586, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 0.002009996623522791, | |
"loss": 1.3533, | |
"step": 775 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 0.002002039392234102, | |
"loss": 1.237, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 0.0019940821609454136, | |
"loss": 1.2833, | |
"step": 785 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 0.001986124929656725, | |
"loss": 1.3615, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 0.001978167698368036, | |
"loss": 1.461, | |
"step": 795 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 0.001970210467079347, | |
"loss": 1.3725, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 0.0019622532357906584, | |
"loss": 1.2839, | |
"step": 805 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 0.0019542960045019694, | |
"loss": 1.2933, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 0.0019463387732132809, | |
"loss": 1.2416, | |
"step": 815 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 0.0019383815419245919, | |
"loss": 1.3515, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 0.0019304243106359029, | |
"loss": 1.2728, | |
"step": 825 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 0.0019224670793472143, | |
"loss": 1.2913, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 0.0019145098480585253, | |
"loss": 1.2836, | |
"step": 835 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 0.0019065526167698368, | |
"loss": 1.3717, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 0.0018985953854811478, | |
"loss": 1.2558, | |
"step": 845 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 0.001890638154192459, | |
"loss": 1.3507, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 0.0018826809229037704, | |
"loss": 1.3441, | |
"step": 855 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 0.0018747236916150814, | |
"loss": 1.307, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 0.0018667664603263928, | |
"loss": 1.3997, | |
"step": 865 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 0.0018588092290377038, | |
"loss": 1.3367, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 0.0018508519977490148, | |
"loss": 1.3763, | |
"step": 875 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 0.0018428947664603263, | |
"loss": 1.4019, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 0.0018349375351716373, | |
"loss": 1.1833, | |
"step": 885 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 0.0018269803038829487, | |
"loss": 1.3296, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 0.00181902307259426, | |
"loss": 1.3258, | |
"step": 895 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 0.001811065841305571, | |
"loss": 1.3643, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 0.0018031086100168824, | |
"loss": 1.3896, | |
"step": 905 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 0.0017951513787281934, | |
"loss": 1.3775, | |
"step": 910 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 0.0017871941474395048, | |
"loss": 1.3878, | |
"step": 915 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 0.0017792369161508158, | |
"loss": 1.339, | |
"step": 920 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 0.0017712796848621268, | |
"loss": 1.3225, | |
"step": 925 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 0.0017633224535734382, | |
"loss": 1.2899, | |
"step": 930 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 0.0017553652222847495, | |
"loss": 1.3449, | |
"step": 935 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 0.0017474079909960607, | |
"loss": 1.3988, | |
"step": 940 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 0.001739450759707372, | |
"loss": 1.2508, | |
"step": 945 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 0.001731493528418683, | |
"loss": 1.2946, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 0.0017235362971299943, | |
"loss": 1.2393, | |
"step": 955 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 0.0017155790658413053, | |
"loss": 1.3437, | |
"step": 960 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 0.0017076218345526168, | |
"loss": 1.2589, | |
"step": 965 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 0.0016996646032639278, | |
"loss": 1.3129, | |
"step": 970 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 0.001691707371975239, | |
"loss": 1.3064, | |
"step": 975 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 0.0016837501406865502, | |
"loss": 1.3121, | |
"step": 980 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 0.0016757929093978614, | |
"loss": 1.316, | |
"step": 985 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 0.0016678356781091727, | |
"loss": 1.3004, | |
"step": 990 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 0.0016598784468204839, | |
"loss": 1.3423, | |
"step": 995 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 0.0016519212155317949, | |
"loss": 1.2852, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 0.0016439639842431063, | |
"loss": 1.3119, | |
"step": 1005 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 0.0016360067529544173, | |
"loss": 1.2672, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 0.0016280495216657287, | |
"loss": 1.2331, | |
"step": 1015 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 0.0016200922903770397, | |
"loss": 1.2987, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 0.001612135059088351, | |
"loss": 1.3455, | |
"step": 1025 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 0.0016041778277996622, | |
"loss": 1.2899, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 0.0015962205965109734, | |
"loss": 1.2729, | |
"step": 1035 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 0.0015882633652222848, | |
"loss": 1.2688, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 0.0015803061339335958, | |
"loss": 1.2367, | |
"step": 1045 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 0.0015723489026449068, | |
"loss": 1.302, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 0.0015643916713562183, | |
"loss": 1.3786, | |
"step": 1055 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 0.0015564344400675293, | |
"loss": 1.2653, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 0.0015484772087788407, | |
"loss": 1.2689, | |
"step": 1065 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 0.0015405199774901517, | |
"loss": 1.2673, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 0.001532562746201463, | |
"loss": 1.261, | |
"step": 1075 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 0.0015246055149127744, | |
"loss": 1.1957, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 0.0015166482836240854, | |
"loss": 1.4158, | |
"step": 1085 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 0.0015086910523353968, | |
"loss": 1.1961, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 0.0015007338210467078, | |
"loss": 1.3019, | |
"step": 1095 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 0.0014927765897580188, | |
"loss": 1.2551, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 0.0014848193584693302, | |
"loss": 1.2763, | |
"step": 1105 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 0.0014768621271806412, | |
"loss": 1.2999, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 0.0014689048958919527, | |
"loss": 1.2929, | |
"step": 1115 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 0.001460947664603264, | |
"loss": 1.2321, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 0.001452990433314575, | |
"loss": 1.3635, | |
"step": 1125 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 0.0014450332020258863, | |
"loss": 1.2883, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 0.0014370759707371973, | |
"loss": 1.3663, | |
"step": 1135 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 0.0014291187394485088, | |
"loss": 1.268, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 0.0014211615081598198, | |
"loss": 1.2976, | |
"step": 1145 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 0.001413204276871131, | |
"loss": 1.3624, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 0.0014052470455824422, | |
"loss": 1.3659, | |
"step": 1155 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 0.0013972898142937534, | |
"loss": 1.2711, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 0.0013893325830050646, | |
"loss": 1.2882, | |
"step": 1165 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 0.0013813753517163759, | |
"loss": 1.321, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 0.001373418120427687, | |
"loss": 1.3505, | |
"step": 1175 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 0.0013654608891389983, | |
"loss": 1.2967, | |
"step": 1180 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 0.0013575036578503093, | |
"loss": 1.2868, | |
"step": 1185 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 0.0013495464265616205, | |
"loss": 1.2786, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 0.0013415891952729317, | |
"loss": 1.2839, | |
"step": 1195 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 0.001333631963984243, | |
"loss": 1.309, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 0.0013256747326955542, | |
"loss": 1.3182, | |
"step": 1205 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 0.0013177175014068654, | |
"loss": 1.3086, | |
"step": 1210 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 0.0013097602701181766, | |
"loss": 1.189, | |
"step": 1215 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 0.0013018030388294878, | |
"loss": 1.3253, | |
"step": 1220 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 0.001293845807540799, | |
"loss": 1.2408, | |
"step": 1225 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 0.0012858885762521103, | |
"loss": 1.3168, | |
"step": 1230 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 0.0012779313449634215, | |
"loss": 1.2883, | |
"step": 1235 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 0.0012699741136747325, | |
"loss": 1.3386, | |
"step": 1240 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 0.0012620168823860437, | |
"loss": 1.4053, | |
"step": 1245 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 0.001254059651097355, | |
"loss": 1.2768, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 0.0012461024198086661, | |
"loss": 1.2946, | |
"step": 1255 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 0.0012381451885199776, | |
"loss": 1.1944, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 0.0012301879572312886, | |
"loss": 1.1929, | |
"step": 1265 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 0.0012222307259425998, | |
"loss": 1.3793, | |
"step": 1270 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 0.001214273494653911, | |
"loss": 1.2986, | |
"step": 1275 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 0.0012063162633652222, | |
"loss": 1.3376, | |
"step": 1280 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 0.0011983590320765335, | |
"loss": 1.2406, | |
"step": 1285 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 0.0011904018007878445, | |
"loss": 1.3542, | |
"step": 1290 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 0.0011824445694991557, | |
"loss": 1.3042, | |
"step": 1295 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 0.001174487338210467, | |
"loss": 1.3189, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 0.0011665301069217783, | |
"loss": 1.2538, | |
"step": 1305 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 0.0011585728756330895, | |
"loss": 1.3152, | |
"step": 1310 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 0.0011506156443444005, | |
"loss": 1.2528, | |
"step": 1315 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 0.0011426584130557118, | |
"loss": 1.2752, | |
"step": 1320 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 0.001134701181767023, | |
"loss": 1.3363, | |
"step": 1325 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 0.0011267439504783342, | |
"loss": 1.2964, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 0.0011187867191896454, | |
"loss": 1.2031, | |
"step": 1335 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 0.0011108294879009564, | |
"loss": 1.3499, | |
"step": 1340 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 0.0011028722566122679, | |
"loss": 1.2842, | |
"step": 1345 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 0.001094915025323579, | |
"loss": 1.2108, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 0.0010869577940348903, | |
"loss": 1.3822, | |
"step": 1355 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 0.0010790005627462015, | |
"loss": 1.303, | |
"step": 1360 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 0.0010710433314575125, | |
"loss": 1.25, | |
"step": 1365 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 0.0010630861001688237, | |
"loss": 1.2506, | |
"step": 1370 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 0.001055128868880135, | |
"loss": 1.3144, | |
"step": 1375 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 0.0010471716375914462, | |
"loss": 1.3205, | |
"step": 1380 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 0.0010392144063027574, | |
"loss": 1.1656, | |
"step": 1385 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 0.0010312571750140686, | |
"loss": 1.2727, | |
"step": 1390 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 0.0010232999437253798, | |
"loss": 1.2887, | |
"step": 1395 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 0.001015342712436691, | |
"loss": 1.2759, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 0.0010073854811480023, | |
"loss": 1.2698, | |
"step": 1405 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 0.0009994282498593135, | |
"loss": 1.3167, | |
"step": 1410 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 0.0009914710185706245, | |
"loss": 1.2659, | |
"step": 1415 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 0.0009835137872819357, | |
"loss": 1.2209, | |
"step": 1420 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 0.000975556555993247, | |
"loss": 1.2778, | |
"step": 1425 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 0.0009675993247045582, | |
"loss": 1.2276, | |
"step": 1430 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 0.0009596420934158695, | |
"loss": 1.2704, | |
"step": 1435 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 0.0009516848621271805, | |
"loss": 1.2859, | |
"step": 1440 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 0.0009437276308384918, | |
"loss": 1.3544, | |
"step": 1445 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 0.000935770399549803, | |
"loss": 1.2112, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 0.0009278131682611142, | |
"loss": 1.2632, | |
"step": 1455 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 0.0009198559369724254, | |
"loss": 1.2354, | |
"step": 1460 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 0.0009118987056837366, | |
"loss": 1.2159, | |
"step": 1465 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 0.0009039414743950478, | |
"loss": 1.2772, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 0.000895984243106359, | |
"loss": 1.2689, | |
"step": 1475 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 0.0008880270118176702, | |
"loss": 1.2522, | |
"step": 1480 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 0.0008800697805289814, | |
"loss": 1.2642, | |
"step": 1485 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 0.0008721125492402925, | |
"loss": 1.3123, | |
"step": 1490 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 0.0008641553179516038, | |
"loss": 1.2526, | |
"step": 1495 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 0.000856198086662915, | |
"loss": 1.3556, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 0.0008482408553742262, | |
"loss": 1.1951, | |
"step": 1505 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 0.0008402836240855374, | |
"loss": 1.3322, | |
"step": 1510 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 0.0008323263927968485, | |
"loss": 1.2582, | |
"step": 1515 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 0.0008243691615081597, | |
"loss": 1.3049, | |
"step": 1520 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 0.000816411930219471, | |
"loss": 1.1639, | |
"step": 1525 | |
}, | |
{ | |
"epoch": 1.53, | |
"learning_rate": 0.0008084546989307822, | |
"loss": 1.2385, | |
"step": 1530 | |
}, | |
{ | |
"epoch": 1.53, | |
"learning_rate": 0.0008004974676420934, | |
"loss": 1.268, | |
"step": 1535 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 0.0007925402363534045, | |
"loss": 1.2466, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 0.0007845830050647157, | |
"loss": 1.2585, | |
"step": 1545 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 0.000776625773776027, | |
"loss": 1.2781, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 0.0007686685424873382, | |
"loss": 1.19, | |
"step": 1555 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 0.0007607113111986494, | |
"loss": 1.2718, | |
"step": 1560 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 0.0007527540799099605, | |
"loss": 1.2066, | |
"step": 1565 | |
}, | |
{ | |
"epoch": 1.57, | |
"learning_rate": 0.0007447968486212717, | |
"loss": 1.2605, | |
"step": 1570 | |
}, | |
{ | |
"epoch": 1.57, | |
"learning_rate": 0.0007368396173325829, | |
"loss": 1.2403, | |
"step": 1575 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 0.0007288823860438941, | |
"loss": 1.3096, | |
"step": 1580 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 0.0007209251547552054, | |
"loss": 1.152, | |
"step": 1585 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 0.0007129679234665165, | |
"loss": 1.282, | |
"step": 1590 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 0.0007050106921778277, | |
"loss": 1.2004, | |
"step": 1595 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 0.0006970534608891389, | |
"loss": 1.2049, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 0.0006890962296004501, | |
"loss": 1.1815, | |
"step": 1605 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 0.0006811389983117614, | |
"loss": 1.1898, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 0.0006731817670230726, | |
"loss": 1.244, | |
"step": 1615 | |
}, | |
{ | |
"epoch": 1.62, | |
"learning_rate": 0.0006652245357343837, | |
"loss": 1.2229, | |
"step": 1620 | |
}, | |
{ | |
"epoch": 1.62, | |
"learning_rate": 0.0006572673044456949, | |
"loss": 1.2084, | |
"step": 1625 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 0.0006493100731570061, | |
"loss": 1.3129, | |
"step": 1630 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 0.0006413528418683173, | |
"loss": 1.3901, | |
"step": 1635 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 0.0006333956105796286, | |
"loss": 1.2896, | |
"step": 1640 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 0.0006254383792909397, | |
"loss": 1.2866, | |
"step": 1645 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 0.0006174811480022509, | |
"loss": 1.3251, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 0.0006095239167135622, | |
"loss": 1.1864, | |
"step": 1655 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 0.0006015666854248733, | |
"loss": 1.2127, | |
"step": 1660 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 0.0005936094541361845, | |
"loss": 1.2944, | |
"step": 1665 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 0.0005856522228474956, | |
"loss": 1.3106, | |
"step": 1670 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 0.000577694991558807, | |
"loss": 1.3614, | |
"step": 1675 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 0.0005697377602701182, | |
"loss": 1.2786, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 0.0005617805289814293, | |
"loss": 1.2758, | |
"step": 1685 | |
}, | |
{ | |
"epoch": 1.69, | |
"learning_rate": 0.0005538232976927405, | |
"loss": 1.31, | |
"step": 1690 | |
}, | |
{ | |
"epoch": 1.69, | |
"learning_rate": 0.0005458660664040517, | |
"loss": 1.2246, | |
"step": 1695 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 0.000537908835115363, | |
"loss": 1.2295, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 0.0005299516038266742, | |
"loss": 1.2822, | |
"step": 1705 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 0.0005219943725379853, | |
"loss": 1.2582, | |
"step": 1710 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 0.0005140371412492965, | |
"loss": 1.1898, | |
"step": 1715 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 0.0005060799099606077, | |
"loss": 1.2697, | |
"step": 1720 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 0.0004981226786719189, | |
"loss": 1.2696, | |
"step": 1725 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 0.0004901654473832302, | |
"loss": 1.2383, | |
"step": 1730 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 0.0004822082160945413, | |
"loss": 1.2665, | |
"step": 1735 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 0.00047425098480585254, | |
"loss": 1.3193, | |
"step": 1740 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 0.0004662937535171637, | |
"loss": 1.2431, | |
"step": 1745 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 0.0004583365222284749, | |
"loss": 1.2243, | |
"step": 1750 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 0.00045037929093978614, | |
"loss": 1.2323, | |
"step": 1755 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 0.0004424220596510973, | |
"loss": 1.3318, | |
"step": 1760 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 0.0004344648283624085, | |
"loss": 1.2339, | |
"step": 1765 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 0.0004265075970737197, | |
"loss": 1.3316, | |
"step": 1770 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 0.0004185503657850309, | |
"loss": 1.2981, | |
"step": 1775 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 0.00041059313449634213, | |
"loss": 1.1703, | |
"step": 1780 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 0.0004026359032076533, | |
"loss": 1.2283, | |
"step": 1785 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 0.0003946786719189645, | |
"loss": 1.2554, | |
"step": 1790 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 0.00038672144063027573, | |
"loss": 1.3264, | |
"step": 1795 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 0.0003787642093415869, | |
"loss": 1.2297, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 0.0003708069780528981, | |
"loss": 1.3238, | |
"step": 1805 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 0.0003628497467642093, | |
"loss": 1.2793, | |
"step": 1810 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 0.0003548925154755205, | |
"loss": 1.2891, | |
"step": 1815 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 0.0003469352841868317, | |
"loss": 1.2713, | |
"step": 1820 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 0.0003389780528981429, | |
"loss": 1.218, | |
"step": 1825 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 0.0003310208216094541, | |
"loss": 1.29, | |
"step": 1830 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 0.0003230635903207653, | |
"loss": 1.2781, | |
"step": 1835 | |
}, | |
{ | |
"epoch": 1.84, | |
"learning_rate": 0.00031510635903207653, | |
"loss": 1.2553, | |
"step": 1840 | |
}, | |
{ | |
"epoch": 1.84, | |
"learning_rate": 0.0003071491277433877, | |
"loss": 1.3065, | |
"step": 1845 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 0.0002991918964546989, | |
"loss": 1.2249, | |
"step": 1850 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 0.0002912346651660101, | |
"loss": 1.1943, | |
"step": 1855 | |
}, | |
{ | |
"epoch": 1.86, | |
"learning_rate": 0.0002832774338773213, | |
"loss": 1.3517, | |
"step": 1860 | |
}, | |
{ | |
"epoch": 1.86, | |
"learning_rate": 0.0002753202025886325, | |
"loss": 1.2667, | |
"step": 1865 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 0.0002673629712999437, | |
"loss": 1.2597, | |
"step": 1870 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 0.0002594057400112549, | |
"loss": 1.2336, | |
"step": 1875 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 0.0002514485087225661, | |
"loss": 1.1898, | |
"step": 1880 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 0.0002434912774338773, | |
"loss": 1.1498, | |
"step": 1885 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 0.0002355340461451885, | |
"loss": 1.2345, | |
"step": 1890 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 0.0002275768148564997, | |
"loss": 1.2426, | |
"step": 1895 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 0.00021961958356781088, | |
"loss": 1.256, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 0.00021166235227912213, | |
"loss": 1.2133, | |
"step": 1905 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 0.00020370512099043332, | |
"loss": 1.1982, | |
"step": 1910 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 0.0001957478897017445, | |
"loss": 1.2487, | |
"step": 1915 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 0.0001877906584130557, | |
"loss": 1.3181, | |
"step": 1920 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 0.0001798334271243669, | |
"loss": 1.2821, | |
"step": 1925 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 0.00017187619583567808, | |
"loss": 1.2304, | |
"step": 1930 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 0.0001639189645469893, | |
"loss": 1.2659, | |
"step": 1935 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 0.0001559617332583005, | |
"loss": 1.2992, | |
"step": 1940 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 0.00014800450196961168, | |
"loss": 1.2244, | |
"step": 1945 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 0.0001400472706809229, | |
"loss": 1.2843, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 0.0001320900393922341, | |
"loss": 1.3562, | |
"step": 1955 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 0.0001241328081035453, | |
"loss": 1.2034, | |
"step": 1960 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 0.00011617557681485649, | |
"loss": 1.1652, | |
"step": 1965 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 0.00010821834552616768, | |
"loss": 1.3189, | |
"step": 1970 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 0.00010026111423747889, | |
"loss": 1.2199, | |
"step": 1975 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 9.230388294879008e-05, | |
"loss": 1.2195, | |
"step": 1980 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 8.434665166010128e-05, | |
"loss": 1.2418, | |
"step": 1985 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 7.638942037141249e-05, | |
"loss": 1.2555, | |
"step": 1990 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 6.843218908272369e-05, | |
"loss": 1.3079, | |
"step": 1995 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 6.047495779403489e-05, | |
"loss": 1.2959, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 5.251772650534609e-05, | |
"loss": 1.3329, | |
"step": 2005 | |
}, | |
{ | |
"epoch": 2.01, | |
"learning_rate": 4.456049521665728e-05, | |
"loss": 1.2211, | |
"step": 2010 | |
}, | |
{ | |
"epoch": 2.01, | |
"learning_rate": 3.660326392796848e-05, | |
"loss": 1.1615, | |
"step": 2015 | |
}, | |
{ | |
"epoch": 2.02, | |
"learning_rate": 2.8646032639279683e-05, | |
"loss": 1.1695, | |
"step": 2020 | |
}, | |
{ | |
"epoch": 2.02, | |
"learning_rate": 2.068880135059088e-05, | |
"loss": 1.1895, | |
"step": 2025 | |
}, | |
{ | |
"epoch": 2.03, | |
"learning_rate": 1.2731570061902082e-05, | |
"loss": 1.2908, | |
"step": 2030 | |
}, | |
{ | |
"epoch": 2.03, | |
"learning_rate": 4.7743387732132805e-06, | |
"loss": 1.2253, | |
"step": 2035 | |
}, | |
{ | |
"epoch": 2.04, | |
"step": 2038, | |
"total_flos": 2.5582587482196332e+23, | |
"train_loss": 0.02294661615032911, | |
"train_runtime": 3034.1773, | |
"train_samples_per_second": 11004.826, | |
"train_steps_per_second": 0.672 | |
} | |
], | |
"max_steps": 2038, | |
"num_train_epochs": 3, | |
"total_flos": 2.5582587482196332e+23, | |
"trial_name": null, | |
"trial_params": null | |
} | |