| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 785, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 4.925816001371351, | |
| "learning_rate": 2.0253164556962026e-06, | |
| "loss": 0.6621, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.15316620469093323, | |
| "step": 5, | |
| "valid_targets_mean": 5615.4, | |
| "valid_targets_min": 1301 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 3.6038645425133113, | |
| "learning_rate": 4.556962025316456e-06, | |
| "loss": 0.6885, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1408233940601349, | |
| "step": 10, | |
| "valid_targets_mean": 5344.8, | |
| "valid_targets_min": 1072 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 1.9219967246450544, | |
| "learning_rate": 7.08860759493671e-06, | |
| "loss": 0.6124, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.16160334646701813, | |
| "step": 15, | |
| "valid_targets_mean": 5692.6, | |
| "valid_targets_min": 2447 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 0.9066524081071425, | |
| "learning_rate": 9.620253164556963e-06, | |
| "loss": 0.5859, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1370714008808136, | |
| "step": 20, | |
| "valid_targets_mean": 4999.8, | |
| "valid_targets_min": 834 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.6858551047823068, | |
| "learning_rate": 1.2151898734177216e-05, | |
| "loss": 0.5577, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1048346608877182, | |
| "step": 25, | |
| "valid_targets_mean": 3834.4, | |
| "valid_targets_min": 627 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 0.4737056753916451, | |
| "learning_rate": 1.468354430379747e-05, | |
| "loss": 0.5357, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11810168623924255, | |
| "step": 30, | |
| "valid_targets_mean": 5133.2, | |
| "valid_targets_min": 695 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 0.3987307487395899, | |
| "learning_rate": 1.7215189873417723e-05, | |
| "loss": 0.5225, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.15136444568634033, | |
| "step": 35, | |
| "valid_targets_mean": 5572.9, | |
| "valid_targets_min": 1349 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 0.30408841177006257, | |
| "learning_rate": 1.974683544303798e-05, | |
| "loss": 0.4856, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09305448830127716, | |
| "step": 40, | |
| "valid_targets_mean": 5469.4, | |
| "valid_targets_min": 1156 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 0.28023584477307756, | |
| "learning_rate": 2.2278481012658228e-05, | |
| "loss": 0.4699, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08279567956924438, | |
| "step": 45, | |
| "valid_targets_mean": 3771.6, | |
| "valid_targets_min": 505 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2623299739038126, | |
| "learning_rate": 2.481012658227848e-05, | |
| "loss": 0.4638, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0865197628736496, | |
| "step": 50, | |
| "valid_targets_mean": 4588.6, | |
| "valid_targets_min": 891 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 0.23584052712257855, | |
| "learning_rate": 2.7341772151898737e-05, | |
| "loss": 0.4584, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11883324384689331, | |
| "step": 55, | |
| "valid_targets_mean": 6565.4, | |
| "valid_targets_min": 1072 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 0.22945467495730265, | |
| "learning_rate": 2.987341772151899e-05, | |
| "loss": 0.4345, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10670436173677444, | |
| "step": 60, | |
| "valid_targets_mean": 5431.0, | |
| "valid_targets_min": 748 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 0.25958034421769316, | |
| "learning_rate": 3.240506329113924e-05, | |
| "loss": 0.4568, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10006660968065262, | |
| "step": 65, | |
| "valid_targets_mean": 4435.2, | |
| "valid_targets_min": 666 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 0.25101114300254473, | |
| "learning_rate": 3.49367088607595e-05, | |
| "loss": 0.4288, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0953449234366417, | |
| "step": 70, | |
| "valid_targets_mean": 5001.8, | |
| "valid_targets_min": 770 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.21760950700440734, | |
| "learning_rate": 3.746835443037975e-05, | |
| "loss": 0.4386, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09250706434249878, | |
| "step": 75, | |
| "valid_targets_mean": 5227.5, | |
| "valid_targets_min": 442 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 0.26479413690674075, | |
| "learning_rate": 4e-05, | |
| "loss": 0.4307, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10058815777301788, | |
| "step": 80, | |
| "valid_targets_mean": 4494.2, | |
| "valid_targets_min": 712 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 0.2336713450215106, | |
| "learning_rate": 3.999504991751045e-05, | |
| "loss": 0.4131, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0961797833442688, | |
| "step": 85, | |
| "valid_targets_mean": 6049.8, | |
| "valid_targets_min": 659 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 0.24198160764095675, | |
| "learning_rate": 3.9980202120373464e-05, | |
| "loss": 0.4148, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10415308177471161, | |
| "step": 90, | |
| "valid_targets_mean": 4485.3, | |
| "valid_targets_min": 670 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 0.22408845086959975, | |
| "learning_rate": 3.995546395837111e-05, | |
| "loss": 0.4156, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1032426655292511, | |
| "step": 95, | |
| "valid_targets_mean": 5694.4, | |
| "valid_targets_min": 974 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.36120176963915784, | |
| "learning_rate": 3.992084767709763e-05, | |
| "loss": 0.4079, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10340757668018341, | |
| "step": 100, | |
| "valid_targets_mean": 5096.6, | |
| "valid_targets_min": 886 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 0.24256095374026376, | |
| "learning_rate": 3.987637041189781e-05, | |
| "loss": 0.409, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.12146667391061783, | |
| "step": 105, | |
| "valid_targets_mean": 6735.6, | |
| "valid_targets_min": 760 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 0.22862627049602116, | |
| "learning_rate": 3.982205417938482e-05, | |
| "loss": 0.4045, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1170661598443985, | |
| "step": 110, | |
| "valid_targets_mean": 6922.1, | |
| "valid_targets_min": 1467 | |
| }, | |
| { | |
| "epoch": 0.736, | |
| "grad_norm": 0.25198968443722136, | |
| "learning_rate": 3.975792586654179e-05, | |
| "loss": 0.4027, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11473138630390167, | |
| "step": 115, | |
| "valid_targets_mean": 5690.5, | |
| "valid_targets_min": 576 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 0.22248181013285756, | |
| "learning_rate": 3.968401721741259e-05, | |
| "loss": 0.4025, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10501393675804138, | |
| "step": 120, | |
| "valid_targets_mean": 5778.5, | |
| "valid_targets_min": 1069 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.2307629663338528, | |
| "learning_rate": 3.960036481738819e-05, | |
| "loss": 0.4056, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08154725283384323, | |
| "step": 125, | |
| "valid_targets_mean": 5937.2, | |
| "valid_targets_min": 1019 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 0.23384759922389925, | |
| "learning_rate": 3.950701007509667e-05, | |
| "loss": 0.4039, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10147960484027863, | |
| "step": 130, | |
| "valid_targets_mean": 4729.8, | |
| "valid_targets_min": 687 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 0.23008776759865265, | |
| "learning_rate": 3.940399920190552e-05, | |
| "loss": 0.4101, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09820076823234558, | |
| "step": 135, | |
| "valid_targets_mean": 4614.9, | |
| "valid_targets_min": 496 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 0.23922899554829585, | |
| "learning_rate": 3.92913831890467e-05, | |
| "loss": 0.3919, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08367419987916946, | |
| "step": 140, | |
| "valid_targets_mean": 5070.4, | |
| "valid_targets_min": 729 | |
| }, | |
| { | |
| "epoch": 0.928, | |
| "grad_norm": 0.2273633281215133, | |
| "learning_rate": 3.916921778237556e-05, | |
| "loss": 0.3775, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09413500875234604, | |
| "step": 145, | |
| "valid_targets_mean": 5735.8, | |
| "valid_targets_min": 812 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.26293258335241, | |
| "learning_rate": 3.903756345477612e-05, | |
| "loss": 0.3917, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11875556409358978, | |
| "step": 150, | |
| "valid_targets_mean": 5658.6, | |
| "valid_targets_min": 987 | |
| }, | |
| { | |
| "epoch": 0.992, | |
| "grad_norm": 0.24870909247092857, | |
| "learning_rate": 3.889648537622657e-05, | |
| "loss": 0.3837, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09130781888961792, | |
| "step": 155, | |
| "valid_targets_mean": 5792.5, | |
| "valid_targets_min": 676 | |
| }, | |
| { | |
| "epoch": 1.0192, | |
| "grad_norm": 0.23656516386477458, | |
| "learning_rate": 3.874605338153952e-05, | |
| "loss": 0.3957, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09010981023311615, | |
| "step": 160, | |
| "valid_targets_mean": 4240.8, | |
| "valid_targets_min": 575 | |
| }, | |
| { | |
| "epoch": 1.0512, | |
| "grad_norm": 0.24052725165140795, | |
| "learning_rate": 3.8586341935793265e-05, | |
| "loss": 0.3859, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09425537288188934, | |
| "step": 165, | |
| "valid_targets_mean": 5526.6, | |
| "valid_targets_min": 683 | |
| }, | |
| { | |
| "epoch": 1.0832, | |
| "grad_norm": 0.22871310720091415, | |
| "learning_rate": 3.841743009747089e-05, | |
| "loss": 0.3889, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08064226806163788, | |
| "step": 170, | |
| "valid_targets_mean": 4734.3, | |
| "valid_targets_min": 858 | |
| }, | |
| { | |
| "epoch": 1.1152, | |
| "grad_norm": 0.23207100949526205, | |
| "learning_rate": 3.8239401479325714e-05, | |
| "loss": 0.378, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09184914827346802, | |
| "step": 175, | |
| "valid_targets_mean": 4519.6, | |
| "valid_targets_min": 535 | |
| }, | |
| { | |
| "epoch": 1.1472, | |
| "grad_norm": 0.2268464194505428, | |
| "learning_rate": 3.8052344206992276e-05, | |
| "loss": 0.3799, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1001918688416481, | |
| "step": 180, | |
| "valid_targets_mean": 5724.2, | |
| "valid_targets_min": 1237 | |
| }, | |
| { | |
| "epoch": 1.1792, | |
| "grad_norm": 0.2565054263960601, | |
| "learning_rate": 3.7856350875363396e-05, | |
| "loss": 0.383, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0865120217204094, | |
| "step": 185, | |
| "valid_targets_mean": 4131.6, | |
| "valid_targets_min": 826 | |
| }, | |
| { | |
| "epoch": 1.2112, | |
| "grad_norm": 0.20879271728086604, | |
| "learning_rate": 3.765151850275497e-05, | |
| "loss": 0.3821, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0937279611825943, | |
| "step": 190, | |
| "valid_targets_mean": 5937.2, | |
| "valid_targets_min": 735 | |
| }, | |
| { | |
| "epoch": 1.2432, | |
| "grad_norm": 0.23126357655080354, | |
| "learning_rate": 3.7437948482881104e-05, | |
| "loss": 0.3713, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10395435243844986, | |
| "step": 195, | |
| "valid_targets_mean": 6906.5, | |
| "valid_targets_min": 2016 | |
| }, | |
| { | |
| "epoch": 1.2752, | |
| "grad_norm": 0.23717319419246105, | |
| "learning_rate": 3.721574653466336e-05, | |
| "loss": 0.3828, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09835539758205414, | |
| "step": 200, | |
| "valid_targets_mean": 5437.2, | |
| "valid_targets_min": 824 | |
| }, | |
| { | |
| "epoch": 1.3072, | |
| "grad_norm": 0.35968020546013296, | |
| "learning_rate": 3.698502264989903e-05, | |
| "loss": 0.3815, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07530041038990021, | |
| "step": 205, | |
| "valid_targets_mean": 5013.6, | |
| "valid_targets_min": 712 | |
| }, | |
| { | |
| "epoch": 1.3392, | |
| "grad_norm": 0.24046800998054815, | |
| "learning_rate": 3.674589103881432e-05, | |
| "loss": 0.3878, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11328396946191788, | |
| "step": 210, | |
| "valid_targets_mean": 6360.2, | |
| "valid_targets_min": 1310 | |
| }, | |
| { | |
| "epoch": 1.3712, | |
| "grad_norm": 0.24526162606684027, | |
| "learning_rate": 3.64984700735293e-05, | |
| "loss": 0.3818, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07932275533676147, | |
| "step": 215, | |
| "valid_targets_mean": 4206.3, | |
| "valid_targets_min": 544 | |
| }, | |
| { | |
| "epoch": 1.4032, | |
| "grad_norm": 0.281741811074157, | |
| "learning_rate": 3.624288222946273e-05, | |
| "loss": 0.3799, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10239493101835251, | |
| "step": 220, | |
| "valid_targets_mean": 6040.5, | |
| "valid_targets_min": 615 | |
| }, | |
| { | |
| "epoch": 1.4352, | |
| "grad_norm": 0.23918163077762844, | |
| "learning_rate": 3.597925402470578e-05, | |
| "loss": 0.377, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10365035384893417, | |
| "step": 225, | |
| "valid_targets_mean": 5706.7, | |
| "valid_targets_min": 747 | |
| }, | |
| { | |
| "epoch": 1.4672, | |
| "grad_norm": 0.2307808416847612, | |
| "learning_rate": 3.570771595739445e-05, | |
| "loss": 0.3814, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09817519783973694, | |
| "step": 230, | |
| "valid_targets_mean": 6541.8, | |
| "valid_targets_min": 1951 | |
| }, | |
| { | |
| "epoch": 1.4992, | |
| "grad_norm": 0.28294813209329395, | |
| "learning_rate": 3.5428402441111964e-05, | |
| "loss": 0.3766, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.104357048869133, | |
| "step": 235, | |
| "valid_targets_mean": 5665.6, | |
| "valid_targets_min": 946 | |
| }, | |
| { | |
| "epoch": 1.5312000000000001, | |
| "grad_norm": 0.22957127344835665, | |
| "learning_rate": 3.5141451738352936e-05, | |
| "loss": 0.3798, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07624883949756622, | |
| "step": 240, | |
| "valid_targets_mean": 4442.8, | |
| "valid_targets_min": 627 | |
| }, | |
| { | |
| "epoch": 1.5632000000000001, | |
| "grad_norm": 0.23157621260116973, | |
| "learning_rate": 3.4847005892082266e-05, | |
| "loss": 0.3768, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09372389316558838, | |
| "step": 245, | |
| "valid_targets_mean": 6213.5, | |
| "valid_targets_min": 1380 | |
| }, | |
| { | |
| "epoch": 1.5952, | |
| "grad_norm": 0.251557645562299, | |
| "learning_rate": 3.454521065542273e-05, | |
| "loss": 0.3978, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09636591374874115, | |
| "step": 250, | |
| "valid_targets_mean": 6230.4, | |
| "valid_targets_min": 988 | |
| }, | |
| { | |
| "epoch": 1.6272, | |
| "grad_norm": 0.22282170912257812, | |
| "learning_rate": 3.423621541950597e-05, | |
| "loss": 0.3775, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07495754957199097, | |
| "step": 255, | |
| "valid_targets_mean": 5801.8, | |
| "valid_targets_min": 699 | |
| }, | |
| { | |
| "epoch": 1.6592, | |
| "grad_norm": 0.21437232283170424, | |
| "learning_rate": 3.3920173139522664e-05, | |
| "loss": 0.3728, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09263433516025543, | |
| "step": 260, | |
| "valid_targets_mean": 6159.1, | |
| "valid_targets_min": 840 | |
| }, | |
| { | |
| "epoch": 1.6912, | |
| "grad_norm": 0.23003116022099923, | |
| "learning_rate": 3.35972402590084e-05, | |
| "loss": 0.374, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08770999312400818, | |
| "step": 265, | |
| "valid_targets_mean": 4599.5, | |
| "valid_targets_min": 717 | |
| }, | |
| { | |
| "epoch": 1.7231999999999998, | |
| "grad_norm": 0.22184131464205806, | |
| "learning_rate": 3.326757663240291e-05, | |
| "loss": 0.3655, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10612646490335464, | |
| "step": 270, | |
| "valid_targets_mean": 6200.3, | |
| "valid_targets_min": 834 | |
| }, | |
| { | |
| "epoch": 1.7551999999999999, | |
| "grad_norm": 0.232368163371399, | |
| "learning_rate": 3.293134544592073e-05, | |
| "loss": 0.3652, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09963122755289078, | |
| "step": 275, | |
| "valid_targets_mean": 5060.5, | |
| "valid_targets_min": 1147 | |
| }, | |
| { | |
| "epoch": 1.7872, | |
| "grad_norm": 0.23732321351995295, | |
| "learning_rate": 3.258871313677274e-05, | |
| "loss": 0.3745, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1050419732928276, | |
| "step": 280, | |
| "valid_targets_mean": 6499.1, | |
| "valid_targets_min": 1588 | |
| }, | |
| { | |
| "epoch": 1.8192, | |
| "grad_norm": 0.23570370078440928, | |
| "learning_rate": 3.2239849310778316e-05, | |
| "loss": 0.3684, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09922470897436142, | |
| "step": 285, | |
| "valid_targets_mean": 5659.1, | |
| "valid_targets_min": 994 | |
| }, | |
| { | |
| "epoch": 1.8512, | |
| "grad_norm": 0.2181429073249209, | |
| "learning_rate": 3.188492665840909e-05, | |
| "loss": 0.372, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0864710733294487, | |
| "step": 290, | |
| "valid_targets_mean": 6055.6, | |
| "valid_targets_min": 672 | |
| }, | |
| { | |
| "epoch": 1.8832, | |
| "grad_norm": 0.22598138320758313, | |
| "learning_rate": 3.1524120869305726e-05, | |
| "loss": 0.3775, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09190896153450012, | |
| "step": 295, | |
| "valid_targets_mean": 5374.9, | |
| "valid_targets_min": 805 | |
| }, | |
| { | |
| "epoch": 1.9152, | |
| "grad_norm": 0.2194343909194051, | |
| "learning_rate": 3.11576105453101e-05, | |
| "loss": 0.3711, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10858743637800217, | |
| "step": 300, | |
| "valid_targets_mean": 6487.9, | |
| "valid_targets_min": 718 | |
| }, | |
| { | |
| "epoch": 1.9472, | |
| "grad_norm": 0.21797248434187552, | |
| "learning_rate": 3.0785577112055916e-05, | |
| "loss": 0.3697, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08281909674406052, | |
| "step": 305, | |
| "valid_targets_mean": 5484.2, | |
| "valid_targets_min": 605 | |
| }, | |
| { | |
| "epoch": 1.9792, | |
| "grad_norm": 0.2504302974310377, | |
| "learning_rate": 3.040820472916153e-05, | |
| "loss": 0.381, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08115041255950928, | |
| "step": 310, | |
| "valid_targets_mean": 4204.3, | |
| "valid_targets_min": 663 | |
| }, | |
| { | |
| "epoch": 2.0064, | |
| "grad_norm": 0.2278583979366395, | |
| "learning_rate": 3.002568019906939e-05, | |
| "loss": 0.3751, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09375721216201782, | |
| "step": 315, | |
| "valid_targets_mean": 6225.1, | |
| "valid_targets_min": 1309 | |
| }, | |
| { | |
| "epoch": 2.0384, | |
| "grad_norm": 0.2302462717851301, | |
| "learning_rate": 2.963819287457733e-05, | |
| "loss": 0.3752, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10975902527570724, | |
| "step": 320, | |
| "valid_targets_mean": 6777.1, | |
| "valid_targets_min": 842 | |
| }, | |
| { | |
| "epoch": 2.0704, | |
| "grad_norm": 0.2500340565679184, | |
| "learning_rate": 2.924593456510733e-05, | |
| "loss": 0.3667, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10280564427375793, | |
| "step": 325, | |
| "valid_targets_mean": 6622.0, | |
| "valid_targets_min": 962 | |
| }, | |
| { | |
| "epoch": 2.1024, | |
| "grad_norm": 0.21337742816465596, | |
| "learning_rate": 2.8849099441758306e-05, | |
| "loss": 0.3569, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07776390016078949, | |
| "step": 330, | |
| "valid_targets_mean": 5986.1, | |
| "valid_targets_min": 631 | |
| }, | |
| { | |
| "epoch": 2.1344, | |
| "grad_norm": 0.2508936239712304, | |
| "learning_rate": 2.844788394118979e-05, | |
| "loss": 0.361, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0718369409441948, | |
| "step": 335, | |
| "valid_targets_mean": 4342.3, | |
| "valid_targets_min": 831 | |
| }, | |
| { | |
| "epoch": 2.1664, | |
| "grad_norm": 0.23955711979482625, | |
| "learning_rate": 2.8042486668384164e-05, | |
| "loss": 0.3623, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11865168809890747, | |
| "step": 340, | |
| "valid_targets_mean": 6094.5, | |
| "valid_targets_min": 798 | |
| }, | |
| { | |
| "epoch": 2.1984, | |
| "grad_norm": 0.22032798211593765, | |
| "learning_rate": 2.7633108298335582e-05, | |
| "loss": 0.3607, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08003910630941391, | |
| "step": 345, | |
| "valid_targets_mean": 6063.9, | |
| "valid_targets_min": 1315 | |
| }, | |
| { | |
| "epoch": 2.2304, | |
| "grad_norm": 0.23157157616125337, | |
| "learning_rate": 2.721995147671416e-05, | |
| "loss": 0.3621, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10399216413497925, | |
| "step": 350, | |
| "valid_targets_mean": 6594.4, | |
| "valid_targets_min": 773 | |
| }, | |
| { | |
| "epoch": 2.2624, | |
| "grad_norm": 0.22988855886392837, | |
| "learning_rate": 2.68032207195547e-05, | |
| "loss": 0.3612, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07251277565956116, | |
| "step": 355, | |
| "valid_targets_mean": 4493.4, | |
| "valid_targets_min": 779 | |
| }, | |
| { | |
| "epoch": 2.2944, | |
| "grad_norm": 0.25369176399383053, | |
| "learning_rate": 2.6383122312019604e-05, | |
| "loss": 0.3603, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08884517848491669, | |
| "step": 360, | |
| "valid_targets_mean": 5150.9, | |
| "valid_targets_min": 720 | |
| }, | |
| { | |
| "epoch": 2.3264, | |
| "grad_norm": 0.23021442547864848, | |
| "learning_rate": 2.595986420628597e-05, | |
| "loss": 0.3695, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08386367559432983, | |
| "step": 365, | |
| "valid_targets_mean": 5094.7, | |
| "valid_targets_min": 1164 | |
| }, | |
| { | |
| "epoch": 2.3584, | |
| "grad_norm": 0.21827176704737583, | |
| "learning_rate": 2.5533655918607573e-05, | |
| "loss": 0.3641, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09923890233039856, | |
| "step": 370, | |
| "valid_targets_mean": 6310.4, | |
| "valid_targets_min": 843 | |
| }, | |
| { | |
| "epoch": 2.3904, | |
| "grad_norm": 0.2142635583748864, | |
| "learning_rate": 2.510470842560259e-05, | |
| "loss": 0.3635, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09996455907821655, | |
| "step": 375, | |
| "valid_targets_mean": 6558.8, | |
| "valid_targets_min": 475 | |
| }, | |
| { | |
| "epoch": 2.4224, | |
| "grad_norm": 0.24122556224556901, | |
| "learning_rate": 2.467323405981841e-05, | |
| "loss": 0.3649, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07641372084617615, | |
| "step": 380, | |
| "valid_targets_mean": 4674.8, | |
| "valid_targets_min": 724 | |
| }, | |
| { | |
| "epoch": 2.4544, | |
| "grad_norm": 0.22988548566979566, | |
| "learning_rate": 2.423944640462533e-05, | |
| "loss": 0.3597, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11224524676799774, | |
| "step": 385, | |
| "valid_targets_mean": 6512.4, | |
| "valid_targets_min": 1490 | |
| }, | |
| { | |
| "epoch": 2.4864, | |
| "grad_norm": 0.2382366640328879, | |
| "learning_rate": 2.3803560188490968e-05, | |
| "loss": 0.3606, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08308979868888855, | |
| "step": 390, | |
| "valid_targets_mean": 4992.9, | |
| "valid_targets_min": 603 | |
| }, | |
| { | |
| "epoch": 2.5183999999999997, | |
| "grad_norm": 0.231316806080095, | |
| "learning_rate": 2.336579117868789e-05, | |
| "loss": 0.3641, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09834016114473343, | |
| "step": 395, | |
| "valid_targets_mean": 6120.9, | |
| "valid_targets_min": 1146 | |
| }, | |
| { | |
| "epoch": 2.5504, | |
| "grad_norm": 0.2158330304056028, | |
| "learning_rate": 2.292635607448711e-05, | |
| "loss": 0.3621, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.06864991039037704, | |
| "step": 400, | |
| "valid_targets_mean": 4506.3, | |
| "valid_targets_min": 484 | |
| }, | |
| { | |
| "epoch": 2.5824, | |
| "grad_norm": 0.2366088070452868, | |
| "learning_rate": 2.248547239989008e-05, | |
| "loss": 0.3692, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08551353216171265, | |
| "step": 405, | |
| "valid_targets_mean": 5304.5, | |
| "valid_targets_min": 589 | |
| }, | |
| { | |
| "epoch": 2.6144, | |
| "grad_norm": 0.2286577480849222, | |
| "learning_rate": 2.204335839595255e-05, | |
| "loss": 0.365, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0804942399263382, | |
| "step": 410, | |
| "valid_targets_mean": 4903.5, | |
| "valid_targets_min": 861 | |
| }, | |
| { | |
| "epoch": 2.6464, | |
| "grad_norm": 0.23178373514592107, | |
| "learning_rate": 2.1600232912753452e-05, | |
| "loss": 0.3636, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08426219969987869, | |
| "step": 415, | |
| "valid_targets_mean": 4515.4, | |
| "valid_targets_min": 615 | |
| }, | |
| { | |
| "epoch": 2.6784, | |
| "grad_norm": 0.23415850034103794, | |
| "learning_rate": 2.1156315301062293e-05, | |
| "loss": 0.367, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09100905060768127, | |
| "step": 420, | |
| "valid_targets_mean": 5502.7, | |
| "valid_targets_min": 644 | |
| }, | |
| { | |
| "epoch": 2.7104, | |
| "grad_norm": 0.25788346438164567, | |
| "learning_rate": 2.0711825303758712e-05, | |
| "loss": 0.3656, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10013468563556671, | |
| "step": 425, | |
| "valid_targets_mean": 7158.4, | |
| "valid_targets_min": 920 | |
| }, | |
| { | |
| "epoch": 2.7424, | |
| "grad_norm": 0.2663168450026528, | |
| "learning_rate": 2.0266982947057962e-05, | |
| "loss": 0.3639, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10933417081832886, | |
| "step": 430, | |
| "valid_targets_mean": 5575.8, | |
| "valid_targets_min": 585 | |
| }, | |
| { | |
| "epoch": 2.7744, | |
| "grad_norm": 0.22657666928978396, | |
| "learning_rate": 1.9822008431596083e-05, | |
| "loss": 0.3511, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0875961184501648, | |
| "step": 435, | |
| "valid_targets_mean": 5409.7, | |
| "valid_targets_min": 619 | |
| }, | |
| { | |
| "epoch": 2.8064, | |
| "grad_norm": 0.20762446157137557, | |
| "learning_rate": 1.937712202342881e-05, | |
| "loss": 0.3531, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09684012830257416, | |
| "step": 440, | |
| "valid_targets_mean": 7491.9, | |
| "valid_targets_min": 997 | |
| }, | |
| { | |
| "epoch": 2.8384, | |
| "grad_norm": 0.2094923424751093, | |
| "learning_rate": 1.8932543944998037e-05, | |
| "loss": 0.3654, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.06848344206809998, | |
| "step": 445, | |
| "valid_targets_mean": 4123.4, | |
| "valid_targets_min": 972 | |
| }, | |
| { | |
| "epoch": 2.8704, | |
| "grad_norm": 0.238835533295405, | |
| "learning_rate": 1.8488494266119877e-05, | |
| "loss": 0.3622, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07796338200569153, | |
| "step": 450, | |
| "valid_targets_mean": 4773.1, | |
| "valid_targets_min": 535 | |
| }, | |
| { | |
| "epoch": 2.9024, | |
| "grad_norm": 0.23198080879921099, | |
| "learning_rate": 1.804519279504834e-05, | |
| "loss": 0.3505, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09783093631267548, | |
| "step": 455, | |
| "valid_targets_mean": 4947.8, | |
| "valid_targets_min": 956 | |
| }, | |
| { | |
| "epoch": 2.9344, | |
| "grad_norm": 0.22481005467458817, | |
| "learning_rate": 1.7602858969668365e-05, | |
| "loss": 0.3586, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0951041504740715, | |
| "step": 460, | |
| "valid_targets_mean": 5319.2, | |
| "valid_targets_min": 494 | |
| }, | |
| { | |
| "epoch": 2.9664, | |
| "grad_norm": 0.21909144022779042, | |
| "learning_rate": 1.716171174887231e-05, | |
| "loss": 0.355, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08194763958454132, | |
| "step": 465, | |
| "valid_targets_mean": 6034.9, | |
| "valid_targets_min": 783 | |
| }, | |
| { | |
| "epoch": 2.9984, | |
| "grad_norm": 0.24361544866934678, | |
| "learning_rate": 1.6721969504173484e-05, | |
| "loss": 0.3651, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09759137034416199, | |
| "step": 470, | |
| "valid_targets_mean": 5008.9, | |
| "valid_targets_min": 924 | |
| }, | |
| { | |
| "epoch": 3.0256, | |
| "grad_norm": 0.2016601334929828, | |
| "learning_rate": 1.628384991161041e-05, | |
| "loss": 0.3607, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10218866169452667, | |
| "step": 475, | |
| "valid_targets_mean": 7441.6, | |
| "valid_targets_min": 2449 | |
| }, | |
| { | |
| "epoch": 3.0576, | |
| "grad_norm": 0.23840750792843468, | |
| "learning_rate": 1.5847569843995452e-05, | |
| "loss": 0.362, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09875436127185822, | |
| "step": 480, | |
| "valid_targets_mean": 6337.4, | |
| "valid_targets_min": 597 | |
| }, | |
| { | |
| "epoch": 3.0896, | |
| "grad_norm": 0.24239915747456078, | |
| "learning_rate": 1.5413345263560922e-05, | |
| "loss": 0.349, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0895923301577568, | |
| "step": 485, | |
| "valid_targets_mean": 6322.4, | |
| "valid_targets_min": 839 | |
| }, | |
| { | |
| "epoch": 3.1216, | |
| "grad_norm": 0.21242777049065, | |
| "learning_rate": 1.4981391115056032e-05, | |
| "loss": 0.3628, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10297973453998566, | |
| "step": 490, | |
| "valid_targets_mean": 6493.8, | |
| "valid_targets_min": 560 | |
| }, | |
| { | |
| "epoch": 3.1536, | |
| "grad_norm": 0.22847407513101906, | |
| "learning_rate": 1.455192121934748e-05, | |
| "loss": 0.361, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08375167101621628, | |
| "step": 495, | |
| "valid_targets_mean": 5043.3, | |
| "valid_targets_min": 926 | |
| }, | |
| { | |
| "epoch": 3.1856, | |
| "grad_norm": 0.24586460965020462, | |
| "learning_rate": 1.4125148167576303e-05, | |
| "loss": 0.3576, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08625875413417816, | |
| "step": 500, | |
| "valid_targets_mean": 5513.8, | |
| "valid_targets_min": 761 | |
| }, | |
| { | |
| "epoch": 3.2176, | |
| "grad_norm": 0.2104796732926289, | |
| "learning_rate": 1.3701283215923563e-05, | |
| "loss": 0.3576, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09489600360393524, | |
| "step": 505, | |
| "valid_targets_mean": 6466.4, | |
| "valid_targets_min": 610 | |
| }, | |
| { | |
| "epoch": 3.2496, | |
| "grad_norm": 0.23165585722917922, | |
| "learning_rate": 1.328053618103677e-05, | |
| "loss": 0.3541, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08994518220424652, | |
| "step": 510, | |
| "valid_targets_mean": 5873.0, | |
| "valid_targets_min": 580 | |
| }, | |
| { | |
| "epoch": 3.2816, | |
| "grad_norm": 0.22725684747524982, | |
| "learning_rate": 1.2863115336168916e-05, | |
| "loss": 0.355, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0889921486377716, | |
| "step": 515, | |
| "valid_targets_mean": 4970.9, | |
| "valid_targets_min": 724 | |
| }, | |
| { | |
| "epoch": 3.3136, | |
| "grad_norm": 0.20619667973730407, | |
| "learning_rate": 1.2449227308081509e-05, | |
| "loss": 0.3597, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10102991759777069, | |
| "step": 520, | |
| "valid_targets_mean": 7098.2, | |
| "valid_targets_min": 1160 | |
| }, | |
| { | |
| "epoch": 3.3456, | |
| "grad_norm": 0.21917910359499615, | |
| "learning_rate": 1.2039076974762587e-05, | |
| "loss": 0.344, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08251196891069412, | |
| "step": 525, | |
| "valid_targets_mean": 4624.6, | |
| "valid_targets_min": 617 | |
| }, | |
| { | |
| "epoch": 3.3776, | |
| "grad_norm": 0.20650291768137613, | |
| "learning_rate": 1.163286736401044e-05, | |
| "loss": 0.3469, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08979316800832748, | |
| "step": 530, | |
| "valid_targets_mean": 6418.4, | |
| "valid_targets_min": 607 | |
| }, | |
| { | |
| "epoch": 3.4096, | |
| "grad_norm": 0.216094547753801, | |
| "learning_rate": 1.123079955293322e-05, | |
| "loss": 0.3544, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0932360589504242, | |
| "step": 535, | |
| "valid_targets_mean": 5597.5, | |
| "valid_targets_min": 760 | |
| }, | |
| { | |
| "epoch": 3.4416, | |
| "grad_norm": 0.21281946165230944, | |
| "learning_rate": 1.0833072568414037e-05, | |
| "loss": 0.3568, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08613704890012741, | |
| "step": 540, | |
| "valid_targets_mean": 7236.2, | |
| "valid_targets_min": 855 | |
| }, | |
| { | |
| "epoch": 3.4736000000000002, | |
| "grad_norm": 0.2246368736111037, | |
| "learning_rate": 1.0439883288591057e-05, | |
| "loss": 0.3461, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08083291351795197, | |
| "step": 545, | |
| "valid_targets_mean": 4563.4, | |
| "valid_targets_min": 631 | |
| }, | |
| { | |
| "epoch": 3.5056000000000003, | |
| "grad_norm": 0.2331858163667793, | |
| "learning_rate": 1.0051426345401202e-05, | |
| "loss": 0.3553, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09777645766735077, | |
| "step": 550, | |
| "valid_targets_mean": 5753.4, | |
| "valid_targets_min": 1062 | |
| }, | |
| { | |
| "epoch": 3.5376, | |
| "grad_norm": 0.21721262332976055, | |
| "learning_rate": 9.667894028235704e-06, | |
| "loss": 0.3559, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08036057651042938, | |
| "step": 555, | |
| "valid_targets_mean": 4847.0, | |
| "valid_targets_min": 787 | |
| }, | |
| { | |
| "epoch": 3.5696, | |
| "grad_norm": 0.24113837427031543, | |
| "learning_rate": 9.289476188755315e-06, | |
| "loss": 0.3615, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10509349405765533, | |
| "step": 560, | |
| "valid_targets_mean": 6220.2, | |
| "valid_targets_min": 544 | |
| }, | |
| { | |
| "epoch": 3.6016, | |
| "grad_norm": 0.3203309140334649, | |
| "learning_rate": 8.916360146912122e-06, | |
| "loss": 0.3511, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08810634911060333, | |
| "step": 565, | |
| "valid_targets_mean": 5272.6, | |
| "valid_targets_min": 762 | |
| }, | |
| { | |
| "epoch": 3.6336, | |
| "grad_norm": 0.21080834516529504, | |
| "learning_rate": 8.548730598224646e-06, | |
| "loss": 0.3534, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08992394804954529, | |
| "step": 570, | |
| "valid_targets_mean": 5580.8, | |
| "valid_targets_min": 627 | |
| }, | |
| { | |
| "epoch": 3.6656, | |
| "grad_norm": 0.23999804676893177, | |
| "learning_rate": 8.186769522352053e-06, | |
| "loss": 0.3533, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.11131718754768372, | |
| "step": 575, | |
| "valid_targets_mean": 6170.8, | |
| "valid_targets_min": 1551 | |
| }, | |
| { | |
| "epoch": 3.6976, | |
| "grad_norm": 0.22485406774402675, | |
| "learning_rate": 7.830656093012714e-06, | |
| "loss": 0.3585, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07756373286247253, | |
| "step": 580, | |
| "valid_targets_mean": 5085.8, | |
| "valid_targets_min": 965 | |
| }, | |
| { | |
| "epoch": 3.7296, | |
| "grad_norm": 0.21291429043493842, | |
| "learning_rate": 7.480566589291696e-06, | |
| "loss": 0.353, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09584388136863708, | |
| "step": 585, | |
| "valid_targets_mean": 5766.2, | |
| "valid_targets_min": 748 | |
| }, | |
| { | |
| "epoch": 3.7616, | |
| "grad_norm": 0.20777703805518813, | |
| "learning_rate": 7.1366743083812285e-06, | |
| "loss": 0.3413, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08502918481826782, | |
| "step": 590, | |
| "valid_targets_mean": 6093.1, | |
| "valid_targets_min": 949 | |
| }, | |
| { | |
| "epoch": 3.7936, | |
| "grad_norm": 0.20736032883078803, | |
| "learning_rate": 6.799149479797101e-06, | |
| "loss": 0.355, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07885292172431946, | |
| "step": 595, | |
| "valid_targets_mean": 5630.1, | |
| "valid_targets_min": 1145 | |
| }, | |
| { | |
| "epoch": 3.8256, | |
| "grad_norm": 0.21498651499573626, | |
| "learning_rate": 6.4681591811137e-06, | |
| "loss": 0.3556, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.06810902059078217, | |
| "step": 600, | |
| "valid_targets_mean": 4383.6, | |
| "valid_targets_min": 673 | |
| }, | |
| { | |
| "epoch": 3.8576, | |
| "grad_norm": 0.21297617917135236, | |
| "learning_rate": 6.143867255259197e-06, | |
| "loss": 0.3508, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07745738327503204, | |
| "step": 605, | |
| "valid_targets_mean": 4404.9, | |
| "valid_targets_min": 672 | |
| }, | |
| { | |
| "epoch": 3.8895999999999997, | |
| "grad_norm": 0.20937815919913053, | |
| "learning_rate": 5.8264342294119504e-06, | |
| "loss": 0.3656, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10144443064928055, | |
| "step": 610, | |
| "valid_targets_mean": 6201.3, | |
| "valid_targets_min": 633 | |
| }, | |
| { | |
| "epoch": 3.9215999999999998, | |
| "grad_norm": 0.20961932864285193, | |
| "learning_rate": 5.516017235538258e-06, | |
| "loss": 0.3497, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08958933502435684, | |
| "step": 615, | |
| "valid_targets_mean": 6676.8, | |
| "valid_targets_min": 841 | |
| }, | |
| { | |
| "epoch": 3.9536, | |
| "grad_norm": 0.2132900632103475, | |
| "learning_rate": 5.212769932610695e-06, | |
| "loss": 0.3515, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09926494210958481, | |
| "step": 620, | |
| "valid_targets_mean": 6056.0, | |
| "valid_targets_min": 1621 | |
| }, | |
| { | |
| "epoch": 3.9856, | |
| "grad_norm": 0.21969664998278665, | |
| "learning_rate": 4.916842430545681e-06, | |
| "loss": 0.346, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09499312937259674, | |
| "step": 625, | |
| "valid_targets_mean": 6069.6, | |
| "valid_targets_min": 996 | |
| }, | |
| { | |
| "epoch": 4.0128, | |
| "grad_norm": 0.2271730718707198, | |
| "learning_rate": 4.628381215897837e-06, | |
| "loss": 0.3445, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08944408595561981, | |
| "step": 630, | |
| "valid_targets_mean": 5608.9, | |
| "valid_targets_min": 1098 | |
| }, | |
| { | |
| "epoch": 4.0448, | |
| "grad_norm": 0.210249845105253, | |
| "learning_rate": 4.347529079347914e-06, | |
| "loss": 0.3524, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09703782945871353, | |
| "step": 635, | |
| "valid_targets_mean": 5320.3, | |
| "valid_targets_min": 610 | |
| }, | |
| { | |
| "epoch": 4.0768, | |
| "grad_norm": 0.21425761952188718, | |
| "learning_rate": 4.074425045020247e-06, | |
| "loss": 0.3517, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09806178510189056, | |
| "step": 640, | |
| "valid_targets_mean": 6156.8, | |
| "valid_targets_min": 1075 | |
| }, | |
| { | |
| "epoch": 4.1088, | |
| "grad_norm": 0.19296452759142185, | |
| "learning_rate": 3.8092043016646487e-06, | |
| "loss": 0.3462, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08750709891319275, | |
| "step": 645, | |
| "valid_targets_mean": 5939.2, | |
| "valid_targets_min": 679 | |
| }, | |
| { | |
| "epoch": 4.1408, | |
| "grad_norm": 0.20817469362950297, | |
| "learning_rate": 3.551998135736867e-06, | |
| "loss": 0.3436, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.06587532162666321, | |
| "step": 650, | |
| "valid_targets_mean": 4388.8, | |
| "valid_targets_min": 524 | |
| }, | |
| { | |
| "epoch": 4.1728, | |
| "grad_norm": 0.28860134218752004, | |
| "learning_rate": 3.3029338664107267e-06, | |
| "loss": 0.3488, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09858383238315582, | |
| "step": 655, | |
| "valid_targets_mean": 6164.9, | |
| "valid_targets_min": 1832 | |
| }, | |
| { | |
| "epoch": 4.2048, | |
| "grad_norm": 0.2274447884512814, | |
| "learning_rate": 3.0621347825540625e-06, | |
| "loss": 0.351, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07915853708982468, | |
| "step": 660, | |
| "valid_targets_mean": 4616.7, | |
| "valid_targets_min": 900 | |
| }, | |
| { | |
| "epoch": 4.2368, | |
| "grad_norm": 0.22441955360635754, | |
| "learning_rate": 2.8297200816997183e-06, | |
| "loss": 0.3523, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08973248302936554, | |
| "step": 665, | |
| "valid_targets_mean": 5245.9, | |
| "valid_targets_min": 689 | |
| }, | |
| { | |
| "epoch": 4.2688, | |
| "grad_norm": 0.22165020623426437, | |
| "learning_rate": 2.605804811041803e-06, | |
| "loss": 0.3467, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07190017402172089, | |
| "step": 670, | |
| "valid_targets_mean": 4224.2, | |
| "valid_targets_min": 633 | |
| }, | |
| { | |
| "epoch": 4.3008, | |
| "grad_norm": 0.22088071426569952, | |
| "learning_rate": 2.390499810486351e-06, | |
| "loss": 0.3362, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.1008359044790268, | |
| "step": 675, | |
| "valid_targets_mean": 6355.0, | |
| "valid_targets_min": 814 | |
| }, | |
| { | |
| "epoch": 4.3328, | |
| "grad_norm": 0.19919434220748514, | |
| "learning_rate": 2.183911657784685e-06, | |
| "loss": 0.3522, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08908769488334656, | |
| "step": 680, | |
| "valid_targets_mean": 6297.8, | |
| "valid_targets_min": 667 | |
| }, | |
| { | |
| "epoch": 4.3648, | |
| "grad_norm": 0.22342299709303468, | |
| "learning_rate": 1.986142615776532e-06, | |
| "loss": 0.353, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07984830439090729, | |
| "step": 685, | |
| "valid_targets_mean": 4236.8, | |
| "valid_targets_min": 810 | |
| }, | |
| { | |
| "epoch": 4.3968, | |
| "grad_norm": 0.21685501342432004, | |
| "learning_rate": 1.7972905817690644e-06, | |
| "loss": 0.3539, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09451751410961151, | |
| "step": 690, | |
| "valid_targets_mean": 5870.2, | |
| "valid_targets_min": 562 | |
| }, | |
| { | |
| "epoch": 4.4288, | |
| "grad_norm": 0.2288912352241636, | |
| "learning_rate": 1.617449039076955e-06, | |
| "loss": 0.3533, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10398273169994354, | |
| "step": 695, | |
| "valid_targets_mean": 5586.2, | |
| "valid_targets_min": 697 | |
| }, | |
| { | |
| "epoch": 4.4608, | |
| "grad_norm": 0.23156579703452698, | |
| "learning_rate": 1.4467070107473413e-06, | |
| "loss": 0.3557, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07878932356834412, | |
| "step": 700, | |
| "valid_targets_mean": 4835.4, | |
| "valid_targets_min": 693 | |
| }, | |
| { | |
| "epoch": 4.4928, | |
| "grad_norm": 0.21164725825560687, | |
| "learning_rate": 1.2851490154926816e-06, | |
| "loss": 0.3466, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09743297100067139, | |
| "step": 705, | |
| "valid_targets_mean": 6111.4, | |
| "valid_targets_min": 989 | |
| }, | |
| { | |
| "epoch": 4.5248, | |
| "grad_norm": 0.21414053041752096, | |
| "learning_rate": 1.1328550258533211e-06, | |
| "loss": 0.3533, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10428131371736526, | |
| "step": 710, | |
| "valid_targets_mean": 6066.4, | |
| "valid_targets_min": 832 | |
| }, | |
| { | |
| "epoch": 4.5568, | |
| "grad_norm": 0.22327848120991073, | |
| "learning_rate": 9.899004286103953e-07, | |
| "loss": 0.3554, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10424737632274628, | |
| "step": 715, | |
| "valid_targets_mean": 5980.2, | |
| "valid_targets_min": 960 | |
| }, | |
| { | |
| "epoch": 4.5888, | |
| "grad_norm": 0.19247156612874025, | |
| "learning_rate": 8.5635598746876e-07, | |
| "loss": 0.3457, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07965318858623505, | |
| "step": 720, | |
| "valid_targets_mean": 5630.6, | |
| "valid_targets_min": 1182 | |
| }, | |
| { | |
| "epoch": 4.6208, | |
| "grad_norm": 0.2104354863953018, | |
| "learning_rate": 7.32287808028389e-07, | |
| "loss": 0.357, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10120499134063721, | |
| "step": 725, | |
| "valid_targets_mean": 5889.6, | |
| "valid_targets_min": 1024 | |
| }, | |
| { | |
| "epoch": 4.6528, | |
| "grad_norm": 0.21319413636231446, | |
| "learning_rate": 6.177573050615327e-07, | |
| "loss": 0.3446, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07628591358661652, | |
| "step": 730, | |
| "valid_targets_mean": 5157.9, | |
| "valid_targets_min": 627 | |
| }, | |
| { | |
| "epoch": 4.6848, | |
| "grad_norm": 0.20962176596050827, | |
| "learning_rate": 5.128211721119213e-07, | |
| "loss": 0.3467, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09128950536251068, | |
| "step": 735, | |
| "valid_targets_mean": 5631.8, | |
| "valid_targets_min": 1019 | |
| }, | |
| { | |
| "epoch": 4.7168, | |
| "grad_norm": 0.2126742923611891, | |
| "learning_rate": 4.175313534309755e-07, | |
| "loss": 0.3478, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08647491037845612, | |
| "step": 740, | |
| "valid_targets_mean": 5357.8, | |
| "valid_targets_min": 719 | |
| }, | |
| { | |
| "epoch": 4.7488, | |
| "grad_norm": 0.1980996932188349, | |
| "learning_rate": 3.319350182649861e-07, | |
| "loss": 0.3547, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0895913764834404, | |
| "step": 745, | |
| "valid_targets_mean": 6320.7, | |
| "valid_targets_min": 1813 | |
| }, | |
| { | |
| "epoch": 4.7808, | |
| "grad_norm": 0.1956495852319613, | |
| "learning_rate": 2.560745375059392e-07, | |
| "loss": 0.3488, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08508864045143127, | |
| "step": 750, | |
| "valid_targets_mean": 5826.7, | |
| "valid_targets_min": 747 | |
| }, | |
| { | |
| "epoch": 4.8128, | |
| "grad_norm": 0.20326077871311263, | |
| "learning_rate": 1.8998746271758016e-07, | |
| "loss": 0.3469, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.08511250466108322, | |
| "step": 755, | |
| "valid_targets_mean": 6488.7, | |
| "valid_targets_min": 580 | |
| }, | |
| { | |
| "epoch": 4.8448, | |
| "grad_norm": 0.21531552385542027, | |
| "learning_rate": 1.337065075470778e-07, | |
| "loss": 0.3523, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.07421720027923584, | |
| "step": 760, | |
| "valid_targets_mean": 4237.1, | |
| "valid_targets_min": 762 | |
| }, | |
| { | |
| "epoch": 4.8768, | |
| "grad_norm": 0.209762808261598, | |
| "learning_rate": 8.725953153150279e-08, | |
| "loss": 0.3592, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09930908679962158, | |
| "step": 765, | |
| "valid_targets_mean": 5956.4, | |
| "valid_targets_min": 1315 | |
| }, | |
| { | |
| "epoch": 4.9088, | |
| "grad_norm": 0.22824690040133508, | |
| "learning_rate": 5.066952630711886e-08, | |
| "loss": 0.3473, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.09878163784742355, | |
| "step": 770, | |
| "valid_targets_mean": 5813.5, | |
| "valid_targets_min": 668 | |
| }, | |
| { | |
| "epoch": 4.9408, | |
| "grad_norm": 0.20182630283024727, | |
| "learning_rate": 2.3954604228342283e-08, | |
| "loss": 0.3501, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.10115042328834534, | |
| "step": 775, | |
| "valid_targets_mean": 6430.5, | |
| "valid_targets_min": 1007 | |
| }, | |
| { | |
| "epoch": 4.9728, | |
| "grad_norm": 0.20561067266355393, | |
| "learning_rate": 7.12798940197601e-09, | |
| "loss": 0.3637, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.0818994864821434, | |
| "step": 780, | |
| "valid_targets_mean": 4864.9, | |
| "valid_targets_min": 580 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.38709523228908527, | |
| "learning_rate": 1.9801114115480802e-10, | |
| "loss": 0.3485, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.3410820960998535, | |
| "step": 785, | |
| "valid_targets_mean": 5863.9, | |
| "valid_targets_min": 645 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "loss_nan_ranks": 0, | |
| "loss_rank_avg": 0.3410820960998535, | |
| "step": 785, | |
| "total_flos": 1.8466654367420252e+18, | |
| "train_loss": 0.38092176382708703, | |
| "train_runtime": 23853.1903, | |
| "train_samples_per_second": 2.096, | |
| "train_steps_per_second": 0.033, | |
| "valid_targets_mean": 5863.9, | |
| "valid_targets_min": 645 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 785, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.8466654367420252e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |