|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9774193548387098, |
|
"eval_steps": 78, |
|
"global_step": 620, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.12883105396714237, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4154, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"eval_loss": 0.4359087646007538, |
|
"eval_runtime": 45.0668, |
|
"eval_samples_per_second": 35.503, |
|
"eval_steps_per_second": 0.555, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.13019894725030426, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4102, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.12448027718685753, |
|
"learning_rate": 3e-05, |
|
"loss": 0.4339, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.13617024064519495, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4463, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.1192695514188061, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4257, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.12484938518090542, |
|
"learning_rate": 6e-05, |
|
"loss": 0.4476, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.11589635616988343, |
|
"learning_rate": 7e-05, |
|
"loss": 0.4726, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.11326282450028814, |
|
"learning_rate": 8e-05, |
|
"loss": 0.44, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.10349552391227962, |
|
"learning_rate": 9e-05, |
|
"loss": 0.4623, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.10545214328006507, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4116, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.08723759174445953, |
|
"learning_rate": 0.00011000000000000002, |
|
"loss": 0.4216, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.08139085000714291, |
|
"learning_rate": 0.00012, |
|
"loss": 0.39, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.09205927275648706, |
|
"learning_rate": 0.00013000000000000002, |
|
"loss": 0.434, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.10404439147125429, |
|
"learning_rate": 0.00014, |
|
"loss": 0.4511, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.08585041684076992, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.4048, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.09399258996254131, |
|
"learning_rate": 0.00016, |
|
"loss": 0.4322, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.10425830227796497, |
|
"learning_rate": 0.00017, |
|
"loss": 0.4174, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.08675512504623259, |
|
"learning_rate": 0.00018, |
|
"loss": 0.4247, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.08901871349393159, |
|
"learning_rate": 0.00019, |
|
"loss": 0.4369, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.08060121651209443, |
|
"learning_rate": 0.0002, |
|
"loss": 0.4746, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.0734185701687092, |
|
"learning_rate": 0.00019999940408195878, |
|
"loss": 0.4178, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.07838461773593017, |
|
"learning_rate": 0.00019999761633493753, |
|
"loss": 0.4321, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.07833972271815033, |
|
"learning_rate": 0.00019999463678024317, |
|
"loss": 0.4436, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.07548486089915235, |
|
"learning_rate": 0.0001999904654533872, |
|
"loss": 0.4709, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.08788637574838197, |
|
"learning_rate": 0.00019998510240408496, |
|
"loss": 0.3872, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.0812532852957508, |
|
"learning_rate": 0.0001999785476962552, |
|
"loss": 0.4058, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.08537862859222577, |
|
"learning_rate": 0.00019997080140801932, |
|
"loss": 0.4225, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.08106258078139493, |
|
"learning_rate": 0.00019996186363170035, |
|
"loss": 0.4357, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.08010684638053245, |
|
"learning_rate": 0.00019995173447382193, |
|
"loss": 0.4038, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.08709110487516528, |
|
"learning_rate": 0.00019994041405510705, |
|
"loss": 0.4724, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.07096829165155712, |
|
"learning_rate": 0.00019992790251047656, |
|
"loss": 0.4139, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.06850018644329266, |
|
"learning_rate": 0.00019991419998904747, |
|
"loss": 0.4596, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.07099101390794227, |
|
"learning_rate": 0.00019989930665413147, |
|
"loss": 0.4004, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.07941015413576936, |
|
"learning_rate": 0.00019988322268323268, |
|
"loss": 0.4299, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.07638794258403328, |
|
"learning_rate": 0.0001998659482680456, |
|
"loss": 0.4252, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.07585182258910264, |
|
"learning_rate": 0.00019984748361445308, |
|
"loss": 0.4149, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.07502482333755954, |
|
"learning_rate": 0.0001998278289425234, |
|
"loss": 0.438, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.06605988872292592, |
|
"learning_rate": 0.00019980698448650804, |
|
"loss": 0.4252, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.07295876342245719, |
|
"learning_rate": 0.00019978495049483884, |
|
"loss": 0.4132, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.06948118376282203, |
|
"learning_rate": 0.0001997617272301248, |
|
"loss": 0.4054, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.07729338055807149, |
|
"learning_rate": 0.00019973731496914914, |
|
"loss": 0.3968, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.07332342728556553, |
|
"learning_rate": 0.000199711714002866, |
|
"loss": 0.4734, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.07828161218125397, |
|
"learning_rate": 0.00019968492463639704, |
|
"loss": 0.4479, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.07194198164352739, |
|
"learning_rate": 0.00019965694718902745, |
|
"loss": 0.438, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.07115652880951175, |
|
"learning_rate": 0.00019962778199420265, |
|
"loss": 0.4227, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.0775651962190502, |
|
"learning_rate": 0.00019959742939952392, |
|
"loss": 0.4058, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.06438123421666499, |
|
"learning_rate": 0.00019956588976674443, |
|
"loss": 0.3703, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.07812636493444186, |
|
"learning_rate": 0.00019953316347176488, |
|
"loss": 0.4644, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.07727134848765603, |
|
"learning_rate": 0.00019949925090462909, |
|
"loss": 0.4314, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.07027735759113077, |
|
"learning_rate": 0.0001994641524695193, |
|
"loss": 0.4238, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.07736957822008628, |
|
"learning_rate": 0.00019942786858475126, |
|
"loss": 0.4785, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.0681570396521893, |
|
"learning_rate": 0.0001993903996827694, |
|
"loss": 0.4374, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.07143343715720442, |
|
"learning_rate": 0.00019935174621014173, |
|
"loss": 0.4194, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.06299350036763142, |
|
"learning_rate": 0.00019931190862755417, |
|
"loss": 0.4492, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.07366865611199422, |
|
"learning_rate": 0.0001992708874098054, |
|
"loss": 0.4214, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.06347135008905311, |
|
"learning_rate": 0.00019922868304580118, |
|
"loss": 0.43, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.08268927152964092, |
|
"learning_rate": 0.00019918529603854825, |
|
"loss": 0.4425, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.07034294144963513, |
|
"learning_rate": 0.0001991407269051487, |
|
"loss": 0.4105, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.06630993618399812, |
|
"learning_rate": 0.00019909497617679348, |
|
"loss": 0.3882, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.07226317955725141, |
|
"learning_rate": 0.00019904804439875633, |
|
"loss": 0.4266, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.06938919184806121, |
|
"learning_rate": 0.0001989999321303871, |
|
"loss": 0.4572, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.06492831162679569, |
|
"learning_rate": 0.0001989506399451051, |
|
"loss": 0.4318, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.06645996011351867, |
|
"learning_rate": 0.0001989001684303925, |
|
"loss": 0.417, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.08653889274769928, |
|
"learning_rate": 0.00019884851818778693, |
|
"loss": 0.4353, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.07059932526364451, |
|
"learning_rate": 0.00019879568983287467, |
|
"loss": 0.4175, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.07931101113959307, |
|
"learning_rate": 0.00019874168399528305, |
|
"loss": 0.4187, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.07125698961933573, |
|
"learning_rate": 0.0001986865013186732, |
|
"loss": 0.4159, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.08050044278206689, |
|
"learning_rate": 0.00019863014246073214, |
|
"loss": 0.4353, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.07318879088432853, |
|
"learning_rate": 0.0001985726080931651, |
|
"loss": 0.4202, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.07082720625138651, |
|
"learning_rate": 0.0001985138989016874, |
|
"loss": 0.4464, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.06698591714547134, |
|
"learning_rate": 0.00019845401558601634, |
|
"loss": 0.4251, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.07603667866936462, |
|
"learning_rate": 0.00019839295885986296, |
|
"loss": 0.4298, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.06997110349403513, |
|
"learning_rate": 0.00019833072945092334, |
|
"loss": 0.4205, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.06648812435842759, |
|
"learning_rate": 0.00019826732810086998, |
|
"loss": 0.388, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.08031943933680023, |
|
"learning_rate": 0.00019820275556534304, |
|
"loss": 0.4296, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.07292849758364575, |
|
"learning_rate": 0.00019813701261394136, |
|
"loss": 0.4693, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.06207113723375016, |
|
"learning_rate": 0.00019807010003021312, |
|
"loss": 0.4117, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.07162805849513847, |
|
"learning_rate": 0.00019800201861164664, |
|
"loss": 0.4055, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.4264196753501892, |
|
"eval_runtime": 45.9946, |
|
"eval_samples_per_second": 34.787, |
|
"eval_steps_per_second": 0.544, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.07162623816355349, |
|
"learning_rate": 0.00019793276916966083, |
|
"loss": 0.4203, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.06472836232949207, |
|
"learning_rate": 0.00019786235252959553, |
|
"loss": 0.4625, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.06372740407034722, |
|
"learning_rate": 0.00019779076953070168, |
|
"loss": 0.4073, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.06667839761368001, |
|
"learning_rate": 0.00019771802102613127, |
|
"loss": 0.4778, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.059574939985801316, |
|
"learning_rate": 0.00019764410788292722, |
|
"loss": 0.407, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.07648625055922231, |
|
"learning_rate": 0.00019756903098201308, |
|
"loss": 0.4295, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.06038606924130534, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 0.41, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.07670919708474994, |
|
"learning_rate": 0.00019741538950008818, |
|
"loss": 0.4139, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.07017791304733184, |
|
"learning_rate": 0.00019733682675023207, |
|
"loss": 0.4117, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.0604723656482711, |
|
"learning_rate": 0.0001972571039049533, |
|
"loss": 0.4323, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.08127405172847653, |
|
"learning_rate": 0.0001971762219144174, |
|
"loss": 0.4396, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.07061782085169709, |
|
"learning_rate": 0.0001970941817426052, |
|
"loss": 0.3927, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.06402819168379283, |
|
"learning_rate": 0.00019701098436730106, |
|
"loss": 0.4581, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.07968710882894958, |
|
"learning_rate": 0.00019692663078008132, |
|
"loss": 0.4587, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.061691070941232884, |
|
"learning_rate": 0.00019684112198630244, |
|
"loss": 0.3996, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.07482749601200543, |
|
"learning_rate": 0.00019675445900508909, |
|
"loss": 0.4824, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.062418664967878255, |
|
"learning_rate": 0.00019666664286932198, |
|
"loss": 0.4019, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.07330319089356814, |
|
"learning_rate": 0.00019657767462562544, |
|
"loss": 0.4083, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.07046344898906809, |
|
"learning_rate": 0.00019648755533435518, |
|
"loss": 0.4487, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.06780433458947788, |
|
"learning_rate": 0.00019639628606958533, |
|
"loss": 0.428, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.07589568906196692, |
|
"learning_rate": 0.00019630386791909602, |
|
"loss": 0.4351, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.06426439686349943, |
|
"learning_rate": 0.00019621030198436006, |
|
"loss": 0.4144, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.06946467483247702, |
|
"learning_rate": 0.00019611558938053002, |
|
"loss": 0.42, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.06687266974088095, |
|
"learning_rate": 0.00019601973123642492, |
|
"loss": 0.4102, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.06476195717860939, |
|
"learning_rate": 0.0001959227286945167, |
|
"loss": 0.4307, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.06784624564247167, |
|
"learning_rate": 0.00019582458291091663, |
|
"loss": 0.437, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.07092126591759794, |
|
"learning_rate": 0.0001957252950553616, |
|
"loss": 0.4086, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.07528584467610183, |
|
"learning_rate": 0.00019562486631120006, |
|
"loss": 0.4131, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.07372322110054848, |
|
"learning_rate": 0.00019552329787537805, |
|
"loss": 0.4021, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.06862887663783342, |
|
"learning_rate": 0.00019542059095842485, |
|
"loss": 0.3967, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.06913146575239316, |
|
"learning_rate": 0.00019531674678443853, |
|
"loss": 0.3912, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.06645883203424491, |
|
"learning_rate": 0.00019521176659107142, |
|
"loss": 0.4251, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.06416682030197016, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.441, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.06876092886215074, |
|
"learning_rate": 0.00019499840316448673, |
|
"loss": 0.4391, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.06688267191994776, |
|
"learning_rate": 0.00019489002247421148, |
|
"loss": 0.4257, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.06324346733094179, |
|
"learning_rate": 0.00019478051085040975, |
|
"loss": 0.4528, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.06725879673299155, |
|
"learning_rate": 0.0001946698695982806, |
|
"loss": 0.4147, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.07241340618039839, |
|
"learning_rate": 0.00019455810003648637, |
|
"loss": 0.4527, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.06613334861150516, |
|
"learning_rate": 0.00019444520349713704, |
|
"loss": 0.4283, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.06840872345753035, |
|
"learning_rate": 0.0001943311813257743, |
|
"loss": 0.4131, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.0704617274405355, |
|
"learning_rate": 0.00019421603488135557, |
|
"loss": 0.4379, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.06787816360755813, |
|
"learning_rate": 0.00019409976553623766, |
|
"loss": 0.4177, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.06972714992884815, |
|
"learning_rate": 0.0001939823746761606, |
|
"loss": 0.3795, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.06779189257040956, |
|
"learning_rate": 0.00019386386370023103, |
|
"loss": 0.4323, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.07014006424788279, |
|
"learning_rate": 0.00019374423402090553, |
|
"loss": 0.3989, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.07661659071907612, |
|
"learning_rate": 0.00019362348706397373, |
|
"loss": 0.4422, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.07584620511227261, |
|
"learning_rate": 0.0001935016242685415, |
|
"loss": 0.4512, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.07525185527526344, |
|
"learning_rate": 0.00019337864708701357, |
|
"loss": 0.4298, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0710375411666665, |
|
"learning_rate": 0.00019325455698507638, |
|
"loss": 0.3878, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0673112696784915, |
|
"learning_rate": 0.00019312935544168048, |
|
"loss": 0.3705, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.0668828668657636, |
|
"learning_rate": 0.00019300304394902313, |
|
"loss": 0.4017, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.07246208549885681, |
|
"learning_rate": 0.00019287562401253022, |
|
"loss": 0.3806, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.06437628312475605, |
|
"learning_rate": 0.0001927470971508386, |
|
"loss": 0.4033, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.062491146220490415, |
|
"learning_rate": 0.00019261746489577765, |
|
"loss": 0.4136, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.06460575253319188, |
|
"learning_rate": 0.0001924867287923515, |
|
"loss": 0.4285, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.06783309737426804, |
|
"learning_rate": 0.0001923548903987201, |
|
"loss": 0.4211, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.07213052803073557, |
|
"learning_rate": 0.00019222195128618106, |
|
"loss": 0.4416, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.0677809803143048, |
|
"learning_rate": 0.00019208791303915063, |
|
"loss": 0.4384, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.07088268081293747, |
|
"learning_rate": 0.0001919527772551451, |
|
"loss": 0.4099, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.06916606268106777, |
|
"learning_rate": 0.0001918165455447614, |
|
"loss": 0.4299, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.07309072410021103, |
|
"learning_rate": 0.00019167921953165825, |
|
"loss": 0.4122, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.07900186130531832, |
|
"learning_rate": 0.00019154080085253666, |
|
"loss": 0.4599, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.06570679783619483, |
|
"learning_rate": 0.00019140129115712034, |
|
"loss": 0.3662, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.08015148942504076, |
|
"learning_rate": 0.0001912606921081362, |
|
"loss": 0.4002, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.06917459897842565, |
|
"learning_rate": 0.00019111900538129443, |
|
"loss": 0.4339, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.07295609866871429, |
|
"learning_rate": 0.0001909762326652686, |
|
"loss": 0.4252, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.0808022860773497, |
|
"learning_rate": 0.0001908323756616754, |
|
"loss": 0.4424, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.06342264967015554, |
|
"learning_rate": 0.00019068743608505455, |
|
"loss": 0.4039, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.06858427993056751, |
|
"learning_rate": 0.0001905414156628482, |
|
"loss": 0.4157, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.06968670755842428, |
|
"learning_rate": 0.00019039431613538047, |
|
"loss": 0.413, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.07867845357193032, |
|
"learning_rate": 0.00019024613925583652, |
|
"loss": 0.4086, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.0737734252643262, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.42, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.06795299857235934, |
|
"learning_rate": 0.0001899465605174414, |
|
"loss": 0.4186, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.07145917531667596, |
|
"learning_rate": 0.00018979516222907775, |
|
"loss": 0.4451, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.07680401394397088, |
|
"learning_rate": 0.00018964269372957038, |
|
"loss": 0.4206, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.06594889233352544, |
|
"learning_rate": 0.00018948915683609388, |
|
"loss": 0.4234, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.07340962027871342, |
|
"learning_rate": 0.00018933455337855632, |
|
"loss": 0.4543, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.07514208916494701, |
|
"learning_rate": 0.00018917888519957754, |
|
"loss": 0.4281, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.42719101905822754, |
|
"eval_runtime": 31.4768, |
|
"eval_samples_per_second": 50.831, |
|
"eval_steps_per_second": 0.794, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.06549409287056523, |
|
"learning_rate": 0.000189022154154467, |
|
"loss": 0.4324, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.0682462164394273, |
|
"learning_rate": 0.00018886436211120193, |
|
"loss": 0.433, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.08588011407859532, |
|
"learning_rate": 0.00018870551095040477, |
|
"loss": 0.4304, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.06987370558027692, |
|
"learning_rate": 0.000188545602565321, |
|
"loss": 0.4482, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.06240247030946795, |
|
"learning_rate": 0.00018838463886179644, |
|
"loss": 0.3907, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.06891290467646147, |
|
"learning_rate": 0.00018822262175825462, |
|
"loss": 0.4267, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.06542585845674868, |
|
"learning_rate": 0.0001880595531856738, |
|
"loss": 0.4284, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.0629730206122836, |
|
"learning_rate": 0.00018789543508756408, |
|
"loss": 0.4003, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.06871544541236552, |
|
"learning_rate": 0.0001877302694199442, |
|
"loss": 0.4216, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.07149666774302382, |
|
"learning_rate": 0.00018756405815131813, |
|
"loss": 0.3836, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.06997442761322878, |
|
"learning_rate": 0.0001873968032626518, |
|
"loss": 0.4188, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.084225812122246, |
|
"learning_rate": 0.00018722850674734927, |
|
"loss": 0.4298, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.06761216783540702, |
|
"learning_rate": 0.00018705917061122916, |
|
"loss": 0.361, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.07234030808073647, |
|
"learning_rate": 0.00018688879687250067, |
|
"loss": 0.4097, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.06626071479471723, |
|
"learning_rate": 0.00018671738756173944, |
|
"loss": 0.4082, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.06855387266060617, |
|
"learning_rate": 0.0001865449447218635, |
|
"loss": 0.4192, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.06828142558387336, |
|
"learning_rate": 0.00018637147040810885, |
|
"loss": 0.4425, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.06573023022659233, |
|
"learning_rate": 0.00018619696668800492, |
|
"loss": 0.436, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.07607545925103458, |
|
"learning_rate": 0.0001860214356413501, |
|
"loss": 0.4538, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.06694123802468883, |
|
"learning_rate": 0.00018584487936018661, |
|
"loss": 0.4172, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.06815569028292222, |
|
"learning_rate": 0.00018566729994877603, |
|
"loss": 0.4514, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.10184702083851915, |
|
"learning_rate": 0.0001854886995235738, |
|
"loss": 0.4442, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.07024340413477916, |
|
"learning_rate": 0.00018530908021320425, |
|
"loss": 0.4424, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.07366377295970097, |
|
"learning_rate": 0.00018512844415843514, |
|
"loss": 0.4149, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.07181140519657206, |
|
"learning_rate": 0.0001849467935121521, |
|
"loss": 0.4355, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.06898359648739377, |
|
"learning_rate": 0.00018476413043933313, |
|
"loss": 0.4275, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.06680808433374633, |
|
"learning_rate": 0.00018458045711702264, |
|
"loss": 0.4172, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.06630828587681081, |
|
"learning_rate": 0.00018439577573430555, |
|
"loss": 0.3927, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.06251103706062068, |
|
"learning_rate": 0.00018421008849228118, |
|
"loss": 0.4255, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.06270748210458083, |
|
"learning_rate": 0.00018402339760403713, |
|
"loss": 0.3854, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.06864674994414362, |
|
"learning_rate": 0.00018383570529462273, |
|
"loss": 0.4186, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.06636160864684977, |
|
"learning_rate": 0.00018364701380102266, |
|
"loss": 0.3948, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.06783696558409527, |
|
"learning_rate": 0.00018345732537213027, |
|
"loss": 0.4157, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.06273783195633519, |
|
"learning_rate": 0.00018326664226872065, |
|
"loss": 0.42, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.0635460399988344, |
|
"learning_rate": 0.00018307496676342385, |
|
"loss": 0.3863, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.06892459709466887, |
|
"learning_rate": 0.00018288230114069765, |
|
"loss": 0.3972, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.06554573002953047, |
|
"learning_rate": 0.00018268864769680054, |
|
"loss": 0.4235, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.07841135830884972, |
|
"learning_rate": 0.0001824940087397641, |
|
"loss": 0.4345, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.07099852734871953, |
|
"learning_rate": 0.00018229838658936564, |
|
"loss": 0.4582, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.07334719442008945, |
|
"learning_rate": 0.00018210178357710058, |
|
"loss": 0.4247, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.06401877277457285, |
|
"learning_rate": 0.0001819042020461545, |
|
"loss": 0.4233, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.05794553222990041, |
|
"learning_rate": 0.0001817056443513754, |
|
"loss": 0.3993, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.06734254228013473, |
|
"learning_rate": 0.00018150611285924556, |
|
"loss": 0.4501, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.061167398242663894, |
|
"learning_rate": 0.00018130560994785325, |
|
"loss": 0.4791, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.06368439834981215, |
|
"learning_rate": 0.00018110413800686456, |
|
"loss": 0.4031, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.06396454527108245, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.4333, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.07036915456962346, |
|
"learning_rate": 0.00018069829665247976, |
|
"loss": 0.4089, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.07566355163015755, |
|
"learning_rate": 0.00018049393207604733, |
|
"loss": 0.4125, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.07110213568246515, |
|
"learning_rate": 0.00018028860814388827, |
|
"loss": 0.4423, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.06609288074156261, |
|
"learning_rate": 0.00018008232730312723, |
|
"loss": 0.3893, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.06540430345204035, |
|
"learning_rate": 0.00017987509201229378, |
|
"loss": 0.3645, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.06607405584993503, |
|
"learning_rate": 0.00017966690474129285, |
|
"loss": 0.4117, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.0662155287622059, |
|
"learning_rate": 0.00017945776797137543, |
|
"loss": 0.4269, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.0617368116387909, |
|
"learning_rate": 0.00017924768419510904, |
|
"loss": 0.3799, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.0627987397717418, |
|
"learning_rate": 0.00017903665591634794, |
|
"loss": 0.371, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.06749499008783807, |
|
"learning_rate": 0.00017882468565020326, |
|
"loss": 0.3992, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.06631945098396477, |
|
"learning_rate": 0.00017861177592301317, |
|
"loss": 0.4095, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.06685762710051685, |
|
"learning_rate": 0.00017839792927231254, |
|
"loss": 0.4349, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.07502266024951904, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 0.4197, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.08306628299652864, |
|
"learning_rate": 0.00017796743540632223, |
|
"loss": 0.4286, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.06166158787464328, |
|
"learning_rate": 0.0001777507933218138, |
|
"loss": 0.3879, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.06410797862871172, |
|
"learning_rate": 0.00017753322457529614, |
|
"loss": 0.4082, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.07279590060361223, |
|
"learning_rate": 0.00017731473175983212, |
|
"loss": 0.4491, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.06330655145519701, |
|
"learning_rate": 0.00017709531747949796, |
|
"loss": 0.3696, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.06265173315476923, |
|
"learning_rate": 0.00017687498434935223, |
|
"loss": 0.4107, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.06448654912056977, |
|
"learning_rate": 0.00017665373499540463, |
|
"loss": 0.4038, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.08291630469395751, |
|
"learning_rate": 0.00017643157205458483, |
|
"loss": 0.422, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.06632609649023061, |
|
"learning_rate": 0.00017620849817471092, |
|
"loss": 0.4271, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.07513194076660681, |
|
"learning_rate": 0.0001759845160144579, |
|
"loss": 0.4529, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.0687911159938889, |
|
"learning_rate": 0.00017575962824332596, |
|
"loss": 0.3814, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.06806851223680013, |
|
"learning_rate": 0.00017553383754160865, |
|
"loss": 0.4038, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.060354615878050986, |
|
"learning_rate": 0.00017530714660036112, |
|
"loss": 0.4139, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.06771319705049085, |
|
"learning_rate": 0.00017507955812136775, |
|
"loss": 0.4178, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.06554465726049169, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 0.4008, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.06628707050461834, |
|
"learning_rate": 0.00017462169941073475, |
|
"loss": 0.3704, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.0704660527100064, |
|
"learning_rate": 0.0001743914346360205, |
|
"loss": 0.4096, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.06714268412305492, |
|
"learning_rate": 0.00017416028323734598, |
|
"loss": 0.4047, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.06708061829484278, |
|
"learning_rate": 0.00017392824796965702, |
|
"loss": 0.3995, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.42397916316986084, |
|
"eval_runtime": 31.4123, |
|
"eval_samples_per_second": 50.936, |
|
"eval_steps_per_second": 0.796, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.0692311421790837, |
|
"learning_rate": 0.00017369533159843369, |
|
"loss": 0.4493, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.07010639301506344, |
|
"learning_rate": 0.00017346153689965727, |
|
"loss": 0.4134, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.0657681493331333, |
|
"learning_rate": 0.00017322686665977737, |
|
"loss": 0.3958, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.06417143561447558, |
|
"learning_rate": 0.00017299132367567857, |
|
"loss": 0.4104, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.08433041973271442, |
|
"learning_rate": 0.00017275491075464716, |
|
"loss": 0.4417, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.06309959644633113, |
|
"learning_rate": 0.00017251763071433765, |
|
"loss": 0.4027, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.06840734501694612, |
|
"learning_rate": 0.00017227948638273916, |
|
"loss": 0.3961, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.06502571584749511, |
|
"learning_rate": 0.00017204048059814175, |
|
"loss": 0.3961, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.06645154810491374, |
|
"learning_rate": 0.00017180061620910263, |
|
"loss": 0.4034, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.07169067898808924, |
|
"learning_rate": 0.00017155989607441213, |
|
"loss": 0.4578, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.06951259218049313, |
|
"learning_rate": 0.00017131832306305965, |
|
"loss": 0.4222, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.06936292391783441, |
|
"learning_rate": 0.0001710759000541995, |
|
"loss": 0.4412, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.0641932957849969, |
|
"learning_rate": 0.00017083262993711662, |
|
"loss": 0.4122, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.06519962544514646, |
|
"learning_rate": 0.00017058851561119198, |
|
"loss": 0.3949, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.06139777990217894, |
|
"learning_rate": 0.00017034355998586827, |
|
"loss": 0.4533, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.06382389409436602, |
|
"learning_rate": 0.00017009776598061495, |
|
"loss": 0.4225, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.0745112215377997, |
|
"learning_rate": 0.00016985113652489374, |
|
"loss": 0.4172, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.06202295907342136, |
|
"learning_rate": 0.00016960367455812336, |
|
"loss": 0.4156, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.0601321734977661, |
|
"learning_rate": 0.00016935538302964494, |
|
"loss": 0.4094, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.07237506968634055, |
|
"learning_rate": 0.00016910626489868649, |
|
"loss": 0.4254, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.06626388773117148, |
|
"learning_rate": 0.0001688563231343277, |
|
"loss": 0.4277, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.06262183979430723, |
|
"learning_rate": 0.0001686055607154648, |
|
"loss": 0.4143, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.07788401630185697, |
|
"learning_rate": 0.00016835398063077474, |
|
"loss": 0.4328, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.06846682414537354, |
|
"learning_rate": 0.00016810158587867973, |
|
"loss": 0.418, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.0689288634381005, |
|
"learning_rate": 0.00016784837946731148, |
|
"loss": 0.4209, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.06820042044945746, |
|
"learning_rate": 0.00016759436441447545, |
|
"loss": 0.3902, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.07119552472188734, |
|
"learning_rate": 0.0001673395437476146, |
|
"loss": 0.4306, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.06555865871255981, |
|
"learning_rate": 0.00016708392050377363, |
|
"loss": 0.3975, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.0712809166108178, |
|
"learning_rate": 0.0001668274977295626, |
|
"loss": 0.3934, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.07147432449436239, |
|
"learning_rate": 0.00016657027848112062, |
|
"loss": 0.3742, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.06455739935278262, |
|
"learning_rate": 0.00016631226582407952, |
|
"loss": 0.4168, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.06793873318686483, |
|
"learning_rate": 0.00016605346283352727, |
|
"loss": 0.4116, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.058261601463606476, |
|
"learning_rate": 0.00016579387259397127, |
|
"loss": 0.3845, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.062017087628894275, |
|
"learning_rate": 0.00016553349819930165, |
|
"loss": 0.3933, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.059279362535743535, |
|
"learning_rate": 0.00016527234275275445, |
|
"loss": 0.3607, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.0615298158124674, |
|
"learning_rate": 0.00016501040936687443, |
|
"loss": 0.3978, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.05968002667313829, |
|
"learning_rate": 0.00016474770116347824, |
|
"loss": 0.4241, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.06836235857310884, |
|
"learning_rate": 0.00016448422127361706, |
|
"loss": 0.4213, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.06328807134715098, |
|
"learning_rate": 0.00016421997283753927, |
|
"loss": 0.3835, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.07185050583270135, |
|
"learning_rate": 0.00016395495900465304, |
|
"loss": 0.4397, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.06731895516948408, |
|
"learning_rate": 0.00016368918293348892, |
|
"loss": 0.4266, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.06154277474915469, |
|
"learning_rate": 0.000163422647791662, |
|
"loss": 0.405, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.060450974009086104, |
|
"learning_rate": 0.00016315535675583424, |
|
"loss": 0.4149, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.0607120928467259, |
|
"learning_rate": 0.00016288731301167668, |
|
"loss": 0.42, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.06605542298126882, |
|
"learning_rate": 0.00016261851975383137, |
|
"loss": 0.4092, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.06717022241182037, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.4373, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.06027581892234991, |
|
"learning_rate": 0.00016207869752027246, |
|
"loss": 0.3889, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.06756360223208042, |
|
"learning_rate": 0.00016180767497835503, |
|
"loss": 0.411, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.062275969305366226, |
|
"learning_rate": 0.00016153591579026546, |
|
"loss": 0.4146, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.06405007529726114, |
|
"learning_rate": 0.00016126342319492784, |
|
"loss": 0.3982, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.06859548867939594, |
|
"learning_rate": 0.00016099020044000727, |
|
"loss": 0.3841, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.06040247687681776, |
|
"learning_rate": 0.00016071625078187114, |
|
"loss": 0.4261, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.0705993351573918, |
|
"learning_rate": 0.00016044157748555026, |
|
"loss": 0.4264, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.0728951497630979, |
|
"learning_rate": 0.00016016618382470012, |
|
"loss": 0.4225, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.058326948226576664, |
|
"learning_rate": 0.00015989007308156173, |
|
"loss": 0.3817, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.06467973723572644, |
|
"learning_rate": 0.00015961324854692254, |
|
"loss": 0.4132, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.0650952066772671, |
|
"learning_rate": 0.0001593357135200773, |
|
"loss": 0.4185, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.06768263235822244, |
|
"learning_rate": 0.0001590574713087885, |
|
"loss": 0.4362, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.05967945902895899, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.3907, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.05879444877054761, |
|
"learning_rate": 0.00015849887860603374, |
|
"loss": 0.398, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.06052224942840143, |
|
"learning_rate": 0.00015821853477207708, |
|
"loss": 0.3788, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.058292067258336945, |
|
"learning_rate": 0.00015793749706861636, |
|
"loss": 0.3658, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.06616138091865216, |
|
"learning_rate": 0.00015765576884516031, |
|
"loss": 0.4136, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.06466816301584516, |
|
"learning_rate": 0.00015737335345944757, |
|
"loss": 0.3972, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.06889146736527235, |
|
"learning_rate": 0.00015709025427740661, |
|
"loss": 0.4038, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.07028354273665399, |
|
"learning_rate": 0.00015680647467311557, |
|
"loss": 0.4375, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.06913117274097347, |
|
"learning_rate": 0.00015652201802876227, |
|
"loss": 0.4067, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.06933114545676364, |
|
"learning_rate": 0.00015623688773460357, |
|
"loss": 0.3561, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.0732735312469693, |
|
"learning_rate": 0.0001559510871889252, |
|
"loss": 0.4192, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.06095351289998777, |
|
"learning_rate": 0.00015566461979800122, |
|
"loss": 0.4002, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.06279122778820988, |
|
"learning_rate": 0.0001553774889760533, |
|
"loss": 0.4052, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.06346871620982147, |
|
"learning_rate": 0.00015508969814521025, |
|
"loss": 0.4115, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.06265655296923098, |
|
"learning_rate": 0.00015480125073546704, |
|
"loss": 0.4103, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.06650336261140556, |
|
"learning_rate": 0.00015451215018464387, |
|
"loss": 0.4208, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.062225515787237784, |
|
"learning_rate": 0.00015422239993834552, |
|
"loss": 0.3965, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.0705406540443855, |
|
"learning_rate": 0.00015393200344991995, |
|
"loss": 0.4105, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.06075229905592138, |
|
"learning_rate": 0.00015364096418041726, |
|
"loss": 0.3781, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.06703452615801342, |
|
"learning_rate": 0.0001533492855985485, |
|
"loss": 0.3828, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.42175912857055664, |
|
"eval_runtime": 31.52, |
|
"eval_samples_per_second": 50.761, |
|
"eval_steps_per_second": 0.793, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.07504940119607073, |
|
"learning_rate": 0.00015305697118064428, |
|
"loss": 0.4538, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.06730008921576576, |
|
"learning_rate": 0.0001527640244106133, |
|
"loss": 0.4308, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.06514147722515332, |
|
"learning_rate": 0.0001524704487799008, |
|
"loss": 0.4237, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.07257799027926257, |
|
"learning_rate": 0.00015217624778744718, |
|
"loss": 0.4121, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.061674598401760324, |
|
"learning_rate": 0.00015188142493964595, |
|
"loss": 0.4026, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.06932398196022131, |
|
"learning_rate": 0.00015158598375030217, |
|
"loss": 0.3375, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.06215611676822286, |
|
"learning_rate": 0.00015128992774059063, |
|
"loss": 0.3474, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.07308539456115362, |
|
"learning_rate": 0.0001509932604390136, |
|
"loss": 0.3743, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.08101673846459073, |
|
"learning_rate": 0.00015069598538135906, |
|
"loss": 0.3923, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.0781854723300375, |
|
"learning_rate": 0.0001503981061106584, |
|
"loss": 0.3548, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.07970059476577994, |
|
"learning_rate": 0.00015009962617714424, |
|
"loss": 0.3765, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.07371660759085928, |
|
"learning_rate": 0.00014980054913820814, |
|
"loss": 0.3726, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 0.0785483838771728, |
|
"learning_rate": 0.00014950087855835815, |
|
"loss": 0.3604, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 0.07692841651155574, |
|
"learning_rate": 0.00014920061800917638, |
|
"loss": 0.343, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 0.08648668987773721, |
|
"learning_rate": 0.0001488997710692764, |
|
"loss": 0.3902, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.0865161229608944, |
|
"learning_rate": 0.0001485983413242606, |
|
"loss": 0.3469, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.08794754622273447, |
|
"learning_rate": 0.00014829633236667747, |
|
"loss": 0.3763, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.08251409399865628, |
|
"learning_rate": 0.00014799374779597867, |
|
"loss": 0.3817, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 0.08397228131714292, |
|
"learning_rate": 0.0001476905912184763, |
|
"loss": 0.4025, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 0.08505107767597189, |
|
"learning_rate": 0.00014738686624729986, |
|
"loss": 0.4011, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 0.07659854660730629, |
|
"learning_rate": 0.0001470825765023532, |
|
"loss": 0.3865, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 0.07772072859171653, |
|
"learning_rate": 0.0001467777256102712, |
|
"loss": 0.3605, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 0.0836194254090897, |
|
"learning_rate": 0.00014647231720437686, |
|
"loss": 0.3526, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 0.08020420151550436, |
|
"learning_rate": 0.00014616635492463776, |
|
"loss": 0.3848, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 0.0823616317274472, |
|
"learning_rate": 0.00014585984241762267, |
|
"loss": 0.3737, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 0.08804564315641605, |
|
"learning_rate": 0.00014555278333645833, |
|
"loss": 0.3738, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 0.07907938057510025, |
|
"learning_rate": 0.00014524518134078563, |
|
"loss": 0.3472, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 0.07856882508739876, |
|
"learning_rate": 0.00014493704009671613, |
|
"loss": 0.3782, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.07868957075703749, |
|
"learning_rate": 0.0001446283632767884, |
|
"loss": 0.3575, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.08071752593754973, |
|
"learning_rate": 0.00014431915455992414, |
|
"loss": 0.3503, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.07598399871189387, |
|
"learning_rate": 0.0001440094176313844, |
|
"loss": 0.3281, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 0.07681293813372528, |
|
"learning_rate": 0.00014369915618272567, |
|
"loss": 0.4022, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 0.08145783028585485, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 0.391, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 0.07557613065220387, |
|
"learning_rate": 0.00014307707452249012, |
|
"loss": 0.3687, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.08185926073157018, |
|
"learning_rate": 0.000142765261725107, |
|
"loss": 0.3988, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.07738407882567049, |
|
"learning_rate": 0.0001424529392359039, |
|
"loss": 0.3866, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.08703938835229595, |
|
"learning_rate": 0.00014214011077725292, |
|
"loss": 0.3447, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 0.07135637580394055, |
|
"learning_rate": 0.0001418267800775565, |
|
"loss": 0.3279, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 0.08051955565921015, |
|
"learning_rate": 0.00014151295087120306, |
|
"loss": 0.3599, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 0.07193112928698735, |
|
"learning_rate": 0.00014119862689852223, |
|
"loss": 0.3481, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.07479779956270906, |
|
"learning_rate": 0.00014088381190574051, |
|
"loss": 0.3745, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.0751825668630308, |
|
"learning_rate": 0.0001405685096449367, |
|
"loss": 0.3685, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.07642737965285674, |
|
"learning_rate": 0.00014025272387399674, |
|
"loss": 0.376, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 0.0798422899504642, |
|
"learning_rate": 0.00013993645835656953, |
|
"loss": 0.3523, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 0.08064560321068621, |
|
"learning_rate": 0.00013961971686202163, |
|
"loss": 0.4053, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 0.07569549836084653, |
|
"learning_rate": 0.00013930250316539238, |
|
"loss": 0.3612, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 0.07994677074783292, |
|
"learning_rate": 0.0001389848210473491, |
|
"loss": 0.3482, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 0.08019343947198912, |
|
"learning_rate": 0.0001386666742941419, |
|
"loss": 0.3372, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 0.087138980594316, |
|
"learning_rate": 0.0001383480666975586, |
|
"loss": 0.3655, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.07628509775359035, |
|
"learning_rate": 0.00013802900205487948, |
|
"loss": 0.3474, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.08306725000867372, |
|
"learning_rate": 0.00013770948416883205, |
|
"loss": 0.3585, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.07457563276380513, |
|
"learning_rate": 0.00013738951684754585, |
|
"loss": 0.3193, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.08622596022590924, |
|
"learning_rate": 0.00013706910390450677, |
|
"loss": 0.3805, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 0.07962595816519315, |
|
"learning_rate": 0.00013674824915851192, |
|
"loss": 0.3612, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 0.07753518500830336, |
|
"learning_rate": 0.000136426956433624, |
|
"loss": 0.4072, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 0.07575904715111116, |
|
"learning_rate": 0.0001361052295591255, |
|
"loss": 0.3726, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 0.07232655392409623, |
|
"learning_rate": 0.00013578307236947348, |
|
"loss": 0.3487, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 0.08315953805458862, |
|
"learning_rate": 0.00013546048870425356, |
|
"loss": 0.4122, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 0.07725321318543174, |
|
"learning_rate": 0.0001351374824081343, |
|
"loss": 0.3994, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 0.06801941671303215, |
|
"learning_rate": 0.00013481405733082116, |
|
"loss": 0.3572, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 0.07601041676987139, |
|
"learning_rate": 0.00013449021732701106, |
|
"loss": 0.3623, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 0.08156069545740995, |
|
"learning_rate": 0.00013416596625634593, |
|
"loss": 0.3668, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"grad_norm": 0.0811687981854912, |
|
"learning_rate": 0.00013384130798336705, |
|
"loss": 0.3733, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"grad_norm": 0.07837688253315354, |
|
"learning_rate": 0.00013351624637746886, |
|
"loss": 0.3444, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"grad_norm": 0.07841335962415118, |
|
"learning_rate": 0.00013319078531285285, |
|
"loss": 0.3553, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.0885143980175204, |
|
"learning_rate": 0.00013286492866848142, |
|
"loss": 0.3542, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.08695366578649416, |
|
"learning_rate": 0.00013253868032803173, |
|
"loss": 0.3571, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.09224273069746307, |
|
"learning_rate": 0.00013221204417984908, |
|
"loss": 0.3701, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 0.08465395748798342, |
|
"learning_rate": 0.000131885024116901, |
|
"loss": 0.3649, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 0.08136521455604329, |
|
"learning_rate": 0.00013155762403673063, |
|
"loss": 0.3635, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 0.08232751491278589, |
|
"learning_rate": 0.0001312298478414102, |
|
"loss": 0.383, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 0.08042003166038371, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.3854, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 0.07517595475460019, |
|
"learning_rate": 0.0001305731827359753, |
|
"loss": 0.3753, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 0.08259426772986962, |
|
"learning_rate": 0.00013024430165223244, |
|
"loss": 0.3743, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 0.08249155111922718, |
|
"learning_rate": 0.00012991506010598964, |
|
"loss": 0.4007, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 0.07736305672570372, |
|
"learning_rate": 0.0001295854620212664, |
|
"loss": 0.363, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 0.07326027328989387, |
|
"learning_rate": 0.0001292555113263316, |
|
"loss": 0.3605, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.08267552681029215, |
|
"learning_rate": 0.00012892521195365678, |
|
"loss": 0.3811, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 0.4272485077381134, |
|
"eval_runtime": 31.4867, |
|
"eval_samples_per_second": 50.815, |
|
"eval_steps_per_second": 0.794, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.07836996060236551, |
|
"learning_rate": 0.00012859456783986893, |
|
"loss": 0.3375, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.08132634828350899, |
|
"learning_rate": 0.00012826358292570398, |
|
"loss": 0.3803, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.07855371030733681, |
|
"learning_rate": 0.00012793226115595952, |
|
"loss": 0.3333, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.07761778090681633, |
|
"learning_rate": 0.00012760060647944795, |
|
"loss": 0.3249, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.08653543003834345, |
|
"learning_rate": 0.00012726862284894938, |
|
"loss": 0.3754, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.08368500314474855, |
|
"learning_rate": 0.00012693631422116454, |
|
"loss": 0.3497, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 0.08863281952751606, |
|
"learning_rate": 0.00012660368455666752, |
|
"loss": 0.3951, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 0.08372219881292209, |
|
"learning_rate": 0.0001262707378198587, |
|
"loss": 0.3793, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 0.08088256185598346, |
|
"learning_rate": 0.00012593747797891742, |
|
"loss": 0.3541, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 0.0809646416504841, |
|
"learning_rate": 0.0001256039090057547, |
|
"loss": 0.3351, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 0.0834243840140192, |
|
"learning_rate": 0.000125270034875966, |
|
"loss": 0.3653, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 0.08569597310919161, |
|
"learning_rate": 0.00012493585956878354, |
|
"loss": 0.3869, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.07886614027929317, |
|
"learning_rate": 0.0001246013870670293, |
|
"loss": 0.3551, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.08109270204021289, |
|
"learning_rate": 0.0001242666213570672, |
|
"loss": 0.3693, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.07898699446219974, |
|
"learning_rate": 0.0001239315664287558, |
|
"loss": 0.3469, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 0.07880897037364112, |
|
"learning_rate": 0.00012359622627540058, |
|
"loss": 0.3739, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 0.08289417173418122, |
|
"learning_rate": 0.00012326060489370653, |
|
"loss": 0.3727, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 0.0842462438087756, |
|
"learning_rate": 0.00012292470628373037, |
|
"loss": 0.3647, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.08537904855482682, |
|
"learning_rate": 0.00012258853444883295, |
|
"loss": 0.3593, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.07031356294146404, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 0.3109, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.08271226158300135, |
|
"learning_rate": 0.00012191538713395178, |
|
"loss": 0.341, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"grad_norm": 0.081638753906054, |
|
"learning_rate": 0.00012157841967678063, |
|
"loss": 0.3721, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"grad_norm": 0.08282882522768446, |
|
"learning_rate": 0.00012124119504021775, |
|
"loss": 0.3868, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"grad_norm": 0.07632165634443522, |
|
"learning_rate": 0.00012090371724342804, |
|
"loss": 0.3596, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.07723277347269442, |
|
"learning_rate": 0.00012056599030859366, |
|
"loss": 0.3623, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.07600198653763515, |
|
"learning_rate": 0.00012022801826086609, |
|
"loss": 0.3838, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.0737660398002958, |
|
"learning_rate": 0.00011988980512831809, |
|
"loss": 0.3406, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 0.0827463463847524, |
|
"learning_rate": 0.00011955135494189588, |
|
"loss": 0.3797, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 0.07835261149802049, |
|
"learning_rate": 0.00011921267173537086, |
|
"loss": 0.3556, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 0.07869323076948366, |
|
"learning_rate": 0.00011887375954529168, |
|
"loss": 0.3732, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 0.07528530037269936, |
|
"learning_rate": 0.00011853462241093613, |
|
"loss": 0.3586, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 0.08540035485538978, |
|
"learning_rate": 0.00011819526437426298, |
|
"loss": 0.3744, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 0.08162324916698332, |
|
"learning_rate": 0.00011785568947986367, |
|
"loss": 0.3453, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.08520031508547123, |
|
"learning_rate": 0.0001175159017749144, |
|
"loss": 0.3661, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.08622581171267268, |
|
"learning_rate": 0.00011717590530912763, |
|
"loss": 0.34, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.08293740708713171, |
|
"learning_rate": 0.00011683570413470383, |
|
"loss": 0.3525, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.08630674052013089, |
|
"learning_rate": 0.0001164953023062835, |
|
"loss": 0.3663, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 0.08157627202282051, |
|
"learning_rate": 0.00011615470388089835, |
|
"loss": 0.3496, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 0.08155874853440571, |
|
"learning_rate": 0.00011581391291792336, |
|
"loss": 0.367, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 0.08112096087877045, |
|
"learning_rate": 0.00011547293347902812, |
|
"loss": 0.3969, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 0.0788722068697115, |
|
"learning_rate": 0.0001151317696281287, |
|
"loss": 0.3669, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 0.08202544827607615, |
|
"learning_rate": 0.00011479042543133895, |
|
"loss": 0.3604, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 0.07688818566027143, |
|
"learning_rate": 0.00011444890495692213, |
|
"loss": 0.3553, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 0.08043229761191359, |
|
"learning_rate": 0.00011410721227524255, |
|
"loss": 0.3829, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 0.07714097071379519, |
|
"learning_rate": 0.00011376535145871684, |
|
"loss": 0.3578, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 0.0745231009772769, |
|
"learning_rate": 0.00011342332658176555, |
|
"loss": 0.3325, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 0.07732479544740335, |
|
"learning_rate": 0.00011308114172076462, |
|
"loss": 0.3674, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 0.08520859762973104, |
|
"learning_rate": 0.00011273880095399667, |
|
"loss": 0.4001, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 0.0765417910322464, |
|
"learning_rate": 0.00011239630836160246, |
|
"loss": 0.3821, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.07773772452915924, |
|
"learning_rate": 0.0001120536680255323, |
|
"loss": 0.3667, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.07826316953532447, |
|
"learning_rate": 0.00011171088402949739, |
|
"loss": 0.3785, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.0807755587572588, |
|
"learning_rate": 0.00011136796045892102, |
|
"loss": 0.3379, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.0769264038040213, |
|
"learning_rate": 0.00011102490140089009, |
|
"loss": 0.3849, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.07698654136683346, |
|
"learning_rate": 0.00011068171094410618, |
|
"loss": 0.3566, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.0767333052243508, |
|
"learning_rate": 0.00011033839317883701, |
|
"loss": 0.3344, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 0.08336825371769054, |
|
"learning_rate": 0.00010999495219686762, |
|
"loss": 0.3912, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 0.08777594502071236, |
|
"learning_rate": 0.00010965139209145152, |
|
"loss": 0.3919, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 0.08075368961243723, |
|
"learning_rate": 0.00010930771695726201, |
|
"loss": 0.3355, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 0.0817581524046764, |
|
"learning_rate": 0.00010896393089034336, |
|
"loss": 0.3837, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 0.07809571969176528, |
|
"learning_rate": 0.00010862003798806196, |
|
"loss": 0.3461, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 0.08275587894806521, |
|
"learning_rate": 0.00010827604234905748, |
|
"loss": 0.3226, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.08006879996457565, |
|
"learning_rate": 0.00010793194807319408, |
|
"loss": 0.3817, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.0740637352494351, |
|
"learning_rate": 0.00010758775926151154, |
|
"loss": 0.3574, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.07910943875130351, |
|
"learning_rate": 0.00010724348001617625, |
|
"loss": 0.3581, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.0753786712518676, |
|
"learning_rate": 0.00010689911444043248, |
|
"loss": 0.3655, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.08280644113490662, |
|
"learning_rate": 0.00010655466663855349, |
|
"loss": 0.3774, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.08547898182915604, |
|
"learning_rate": 0.0001062101407157924, |
|
"loss": 0.4035, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.08698240550610552, |
|
"learning_rate": 0.00010586554077833347, |
|
"loss": 0.3688, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 0.08242367645301783, |
|
"learning_rate": 0.00010552087093324315, |
|
"loss": 0.3754, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 0.07376544324977327, |
|
"learning_rate": 0.00010517613528842097, |
|
"loss": 0.3587, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 0.08100703861143939, |
|
"learning_rate": 0.00010483133795255071, |
|
"loss": 0.3812, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.08170556800441375, |
|
"learning_rate": 0.00010448648303505151, |
|
"loss": 0.3667, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.08085852463592974, |
|
"learning_rate": 0.00010414157464602866, |
|
"loss": 0.3514, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.0789315049905107, |
|
"learning_rate": 0.00010379661689622477, |
|
"loss": 0.3991, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.07803841799948108, |
|
"learning_rate": 0.00010345161389697082, |
|
"loss": 0.3181, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.08070026028876534, |
|
"learning_rate": 0.00010310656976013705, |
|
"loss": 0.3837, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.08631416699901101, |
|
"learning_rate": 0.0001027614885980839, |
|
"loss": 0.3562, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"grad_norm": 0.07989902063759162, |
|
"learning_rate": 0.00010241637452361323, |
|
"loss": 0.3738, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_loss": 0.4268099069595337, |
|
"eval_runtime": 31.5034, |
|
"eval_samples_per_second": 50.788, |
|
"eval_steps_per_second": 0.794, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"grad_norm": 0.08278261941170967, |
|
"learning_rate": 0.00010207123164991911, |
|
"loss": 0.3798, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"grad_norm": 0.07848169734840553, |
|
"learning_rate": 0.00010172606409053886, |
|
"loss": 0.3807, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.07862977038589984, |
|
"learning_rate": 0.00010138087595930395, |
|
"loss": 0.3701, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.08217076517383108, |
|
"learning_rate": 0.0001010356713702911, |
|
"loss": 0.3903, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.0802021316689387, |
|
"learning_rate": 0.00010069045443777318, |
|
"loss": 0.3634, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 0.07338173288420062, |
|
"learning_rate": 0.00010034522927617014, |
|
"loss": 0.3811, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 0.08200044564472758, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3468, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 0.07839544989553099, |
|
"learning_rate": 9.96547707238299e-05, |
|
"loss": 0.3511, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.07952288871273722, |
|
"learning_rate": 9.930954556222683e-05, |
|
"loss": 0.3552, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.08699081546573348, |
|
"learning_rate": 9.896432862970892e-05, |
|
"loss": 0.3526, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.08423355750874421, |
|
"learning_rate": 9.861912404069608e-05, |
|
"loss": 0.3765, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 0.07759825635438429, |
|
"learning_rate": 9.827393590946116e-05, |
|
"loss": 0.3575, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 0.08333101182425925, |
|
"learning_rate": 9.792876835008091e-05, |
|
"loss": 0.3393, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 0.07612722261829721, |
|
"learning_rate": 9.75836254763868e-05, |
|
"loss": 0.3471, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 0.07873796857875963, |
|
"learning_rate": 9.723851140191613e-05, |
|
"loss": 0.3481, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 0.07711176362729558, |
|
"learning_rate": 9.689343023986302e-05, |
|
"loss": 0.3371, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 0.08146330945626319, |
|
"learning_rate": 9.654838610302923e-05, |
|
"loss": 0.3685, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.07904971154072554, |
|
"learning_rate": 9.620338310377525e-05, |
|
"loss": 0.3851, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.07790237835005973, |
|
"learning_rate": 9.58584253539714e-05, |
|
"loss": 0.3499, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.07832489963425983, |
|
"learning_rate": 9.551351696494854e-05, |
|
"loss": 0.3727, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.08248518618426699, |
|
"learning_rate": 9.516866204744931e-05, |
|
"loss": 0.3784, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.08050275079657898, |
|
"learning_rate": 9.482386471157904e-05, |
|
"loss": 0.3362, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.07770689006350201, |
|
"learning_rate": 9.447912906675686e-05, |
|
"loss": 0.3609, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.07175481533546442, |
|
"learning_rate": 9.413445922166653e-05, |
|
"loss": 0.3544, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.08064147746188607, |
|
"learning_rate": 9.378985928420762e-05, |
|
"loss": 0.3466, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.07743882202608632, |
|
"learning_rate": 9.344533336144652e-05, |
|
"loss": 0.3763, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.07329593275985641, |
|
"learning_rate": 9.31008855595675e-05, |
|
"loss": 0.3503, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 0.0813108612869619, |
|
"learning_rate": 9.275651998382377e-05, |
|
"loss": 0.3582, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 0.07766612713514506, |
|
"learning_rate": 9.241224073848848e-05, |
|
"loss": 0.3736, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 0.08022030269140305, |
|
"learning_rate": 9.206805192680593e-05, |
|
"loss": 0.3651, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 0.0810514185142845, |
|
"learning_rate": 9.172395765094254e-05, |
|
"loss": 0.3734, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 0.08336908596716675, |
|
"learning_rate": 9.137996201193805e-05, |
|
"loss": 0.3764, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 0.07708910580019225, |
|
"learning_rate": 9.103606910965666e-05, |
|
"loss": 0.3432, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.07690985523300399, |
|
"learning_rate": 9.069228304273802e-05, |
|
"loss": 0.3366, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.07814120482229833, |
|
"learning_rate": 9.034860790854849e-05, |
|
"loss": 0.38, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.07754525008604259, |
|
"learning_rate": 9.00050478031324e-05, |
|
"loss": 0.3746, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 0.08174093565738355, |
|
"learning_rate": 8.9661606821163e-05, |
|
"loss": 0.3521, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 0.0874470830671821, |
|
"learning_rate": 8.931828905589385e-05, |
|
"loss": 0.3576, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 0.0791505967737016, |
|
"learning_rate": 8.897509859910995e-05, |
|
"loss": 0.3623, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"grad_norm": 0.08010612556499642, |
|
"learning_rate": 8.863203954107902e-05, |
|
"loss": 0.3596, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"grad_norm": 0.07639082594002607, |
|
"learning_rate": 8.828911597050263e-05, |
|
"loss": 0.3754, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"grad_norm": 0.0812907343779127, |
|
"learning_rate": 8.79463319744677e-05, |
|
"loss": 0.3569, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 0.07981116292396602, |
|
"learning_rate": 8.760369163839758e-05, |
|
"loss": 0.369, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 0.08007339719446442, |
|
"learning_rate": 8.726119904600336e-05, |
|
"loss": 0.3619, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 0.08188074360240942, |
|
"learning_rate": 8.691885827923541e-05, |
|
"loss": 0.3588, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 0.07330400401389027, |
|
"learning_rate": 8.657667341823448e-05, |
|
"loss": 0.361, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 0.07580473341808247, |
|
"learning_rate": 8.62346485412832e-05, |
|
"loss": 0.3332, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 0.0751836352756251, |
|
"learning_rate": 8.589278772475749e-05, |
|
"loss": 0.3275, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 0.076324517800282, |
|
"learning_rate": 8.55510950430779e-05, |
|
"loss": 0.3692, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 0.08062731434583217, |
|
"learning_rate": 8.520957456866107e-05, |
|
"loss": 0.3388, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 0.07385163740127851, |
|
"learning_rate": 8.486823037187129e-05, |
|
"loss": 0.3478, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 0.07490293522468204, |
|
"learning_rate": 8.452706652097186e-05, |
|
"loss": 0.3636, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 0.0809647940628489, |
|
"learning_rate": 8.418608708207667e-05, |
|
"loss": 0.3675, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 0.07923274582937138, |
|
"learning_rate": 8.384529611910163e-05, |
|
"loss": 0.3552, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 0.08216616911262736, |
|
"learning_rate": 8.35046976937165e-05, |
|
"loss": 0.3275, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.07936137998207138, |
|
"learning_rate": 8.316429586529615e-05, |
|
"loss": 0.3582, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.08058250439811132, |
|
"learning_rate": 8.282409469087239e-05, |
|
"loss": 0.3676, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.0792633274598118, |
|
"learning_rate": 8.248409822508561e-05, |
|
"loss": 0.3606, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 0.08160855744923941, |
|
"learning_rate": 8.214431052013634e-05, |
|
"loss": 0.378, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 0.08244144254881117, |
|
"learning_rate": 8.180473562573705e-05, |
|
"loss": 0.3602, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 0.08332491219956155, |
|
"learning_rate": 8.146537758906388e-05, |
|
"loss": 0.3853, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 0.0787381785277075, |
|
"learning_rate": 8.112624045470835e-05, |
|
"loss": 0.3578, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 0.07465375920723784, |
|
"learning_rate": 8.078732826462915e-05, |
|
"loss": 0.3553, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 0.07914126541842607, |
|
"learning_rate": 8.044864505810414e-05, |
|
"loss": 0.38, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.0762325619765211, |
|
"learning_rate": 8.011019487168192e-05, |
|
"loss": 0.3522, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.08205023330200409, |
|
"learning_rate": 7.977198173913394e-05, |
|
"loss": 0.3578, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.07440912776013751, |
|
"learning_rate": 7.943400969140635e-05, |
|
"loss": 0.3349, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 0.08653541120426957, |
|
"learning_rate": 7.909628275657198e-05, |
|
"loss": 0.3878, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 0.08271269508372392, |
|
"learning_rate": 7.875880495978227e-05, |
|
"loss": 0.3687, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 0.07990749427850796, |
|
"learning_rate": 7.84215803232194e-05, |
|
"loss": 0.3428, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.08010343988871826, |
|
"learning_rate": 7.808461286604827e-05, |
|
"loss": 0.3363, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.08385707389143623, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 0.3795, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.08101037261788462, |
|
"learning_rate": 7.741146555116708e-05, |
|
"loss": 0.3781, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 0.08278683251141064, |
|
"learning_rate": 7.707529371626965e-05, |
|
"loss": 0.3406, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 0.08593501282474013, |
|
"learning_rate": 7.673939510629349e-05, |
|
"loss": 0.3664, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 0.07732960398229517, |
|
"learning_rate": 7.640377372459945e-05, |
|
"loss": 0.359, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 0.07698197755802726, |
|
"learning_rate": 7.606843357124426e-05, |
|
"loss": 0.3525, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 0.07748553112157543, |
|
"learning_rate": 7.573337864293283e-05, |
|
"loss": 0.3538, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_loss": 0.42420506477355957, |
|
"eval_runtime": 31.5034, |
|
"eval_samples_per_second": 50.788, |
|
"eval_steps_per_second": 0.794, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 0.0773849964940893, |
|
"learning_rate": 7.539861293297072e-05, |
|
"loss": 0.3442, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.08408291713234628, |
|
"learning_rate": 7.506414043121647e-05, |
|
"loss": 0.3502, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.08137948671656789, |
|
"learning_rate": 7.472996512403403e-05, |
|
"loss": 0.3834, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.07884987774361263, |
|
"learning_rate": 7.43960909942453e-05, |
|
"loss": 0.3535, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.08601619357770882, |
|
"learning_rate": 7.406252202108258e-05, |
|
"loss": 0.3354, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.08277228544080684, |
|
"learning_rate": 7.372926218014131e-05, |
|
"loss": 0.3658, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.08055402198167343, |
|
"learning_rate": 7.339631544333249e-05, |
|
"loss": 0.3933, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.08231940351547128, |
|
"learning_rate": 7.306368577883547e-05, |
|
"loss": 0.389, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 0.07982419038477599, |
|
"learning_rate": 7.273137715105063e-05, |
|
"loss": 0.3373, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 0.07946502223724, |
|
"learning_rate": 7.239939352055208e-05, |
|
"loss": 0.355, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 0.07851925552097333, |
|
"learning_rate": 7.206773884404051e-05, |
|
"loss": 0.3424, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 0.0803764068033967, |
|
"learning_rate": 7.173641707429606e-05, |
|
"loss": 0.3563, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 0.0761192213153037, |
|
"learning_rate": 7.14054321601311e-05, |
|
"loss": 0.3636, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 0.08367999776936366, |
|
"learning_rate": 7.107478804634325e-05, |
|
"loss": 0.3653, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 0.08261688121892191, |
|
"learning_rate": 7.07444886736684e-05, |
|
"loss": 0.3486, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 0.08105782422350354, |
|
"learning_rate": 7.041453797873363e-05, |
|
"loss": 0.3748, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 0.07917828136130849, |
|
"learning_rate": 7.008493989401039e-05, |
|
"loss": 0.3621, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.07203487013224297, |
|
"learning_rate": 6.975569834776758e-05, |
|
"loss": 0.3233, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.08093269486229393, |
|
"learning_rate": 6.942681726402473e-05, |
|
"loss": 0.3582, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.08678311759977655, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.3794, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 0.08084081388372036, |
|
"learning_rate": 6.877015215858981e-05, |
|
"loss": 0.3422, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 0.08553392571374521, |
|
"learning_rate": 6.844237596326941e-05, |
|
"loss": 0.3761, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 0.07912394124644004, |
|
"learning_rate": 6.811497588309901e-05, |
|
"loss": 0.355, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 0.07620057152346986, |
|
"learning_rate": 6.778795582015097e-05, |
|
"loss": 0.3413, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 0.07513161847440154, |
|
"learning_rate": 6.746131967196834e-05, |
|
"loss": 0.3568, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 0.07561185266854122, |
|
"learning_rate": 6.713507133151857e-05, |
|
"loss": 0.3081, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 0.07460533026271897, |
|
"learning_rate": 6.680921468714719e-05, |
|
"loss": 0.359, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 0.07903492734652774, |
|
"learning_rate": 6.648375362253118e-05, |
|
"loss": 0.3524, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 0.07549694179386848, |
|
"learning_rate": 6.615869201663296e-05, |
|
"loss": 0.341, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 0.07701423597853588, |
|
"learning_rate": 6.583403374365405e-05, |
|
"loss": 0.3441, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 0.07155223176721602, |
|
"learning_rate": 6.550978267298893e-05, |
|
"loss": 0.3269, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 0.08474429506026998, |
|
"learning_rate": 6.518594266917882e-05, |
|
"loss": 0.3767, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 0.07508251474073095, |
|
"learning_rate": 6.486251759186572e-05, |
|
"loss": 0.3851, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 0.08827225430407397, |
|
"learning_rate": 6.453951129574644e-05, |
|
"loss": 0.3738, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 0.08202839176557213, |
|
"learning_rate": 6.421692763052653e-05, |
|
"loss": 0.3499, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 0.07833005102594903, |
|
"learning_rate": 6.389477044087452e-05, |
|
"loss": 0.3662, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 0.08063203854850547, |
|
"learning_rate": 6.357304356637606e-05, |
|
"loss": 0.3849, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 0.08474453958289388, |
|
"learning_rate": 6.325175084148809e-05, |
|
"loss": 0.3579, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 0.08269426648339273, |
|
"learning_rate": 6.293089609549325e-05, |
|
"loss": 0.382, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 0.07526222880479648, |
|
"learning_rate": 6.261048315245419e-05, |
|
"loss": 0.3112, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 0.07725562423865082, |
|
"learning_rate": 6.229051583116796e-05, |
|
"loss": 0.3248, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 0.07265450260153018, |
|
"learning_rate": 6.197099794512056e-05, |
|
"loss": 0.3304, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.08008761756737857, |
|
"learning_rate": 6.165193330244143e-05, |
|
"loss": 0.3519, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.0837137135807681, |
|
"learning_rate": 6.133332570585812e-05, |
|
"loss": 0.3338, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.08294791697322053, |
|
"learning_rate": 6.101517895265094e-05, |
|
"loss": 0.3514, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 0.08535305333963247, |
|
"learning_rate": 6.069749683460765e-05, |
|
"loss": 0.3722, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 0.0842929704042906, |
|
"learning_rate": 6.0380283137978396e-05, |
|
"loss": 0.383, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 0.0815197289066111, |
|
"learning_rate": 6.006354164343046e-05, |
|
"loss": 0.3879, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 0.07818811966248758, |
|
"learning_rate": 5.9747276126003257e-05, |
|
"loss": 0.3975, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 0.07781593462629392, |
|
"learning_rate": 5.943149035506337e-05, |
|
"loss": 0.3611, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 0.07759999060222111, |
|
"learning_rate": 5.9116188094259516e-05, |
|
"loss": 0.3494, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 0.0786183900535505, |
|
"learning_rate": 5.880137310147782e-05, |
|
"loss": 0.3407, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 0.08562034093492009, |
|
"learning_rate": 5.848704912879699e-05, |
|
"loss": 0.3474, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 0.08260942393320048, |
|
"learning_rate": 5.817321992244351e-05, |
|
"loss": 0.3329, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.07904114888657499, |
|
"learning_rate": 5.785988922274711e-05, |
|
"loss": 0.3524, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.07582527865163859, |
|
"learning_rate": 5.754706076409613e-05, |
|
"loss": 0.3433, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.0869013852078456, |
|
"learning_rate": 5.723473827489301e-05, |
|
"loss": 0.3676, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"grad_norm": 0.07866051540263401, |
|
"learning_rate": 5.692292547750988e-05, |
|
"loss": 0.3548, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"grad_norm": 0.08298313952251557, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.3624, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"grad_norm": 0.08120082412434228, |
|
"learning_rate": 5.630084381727434e-05, |
|
"loss": 0.35, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 0.08364501577678882, |
|
"learning_rate": 5.599058236861558e-05, |
|
"loss": 0.3143, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 0.07755177727522106, |
|
"learning_rate": 5.568084544007588e-05, |
|
"loss": 0.326, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 0.0839037119714208, |
|
"learning_rate": 5.5371636723211606e-05, |
|
"loss": 0.3656, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.08240199041279521, |
|
"learning_rate": 5.506295990328385e-05, |
|
"loss": 0.378, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.07648943764975623, |
|
"learning_rate": 5.4754818659214405e-05, |
|
"loss": 0.3411, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.0794911884788703, |
|
"learning_rate": 5.444721666354169e-05, |
|
"loss": 0.3334, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.07612986812407667, |
|
"learning_rate": 5.4140157582377336e-05, |
|
"loss": 0.3375, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.07371427661365673, |
|
"learning_rate": 5.383364507536229e-05, |
|
"loss": 0.3637, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.07428710700805227, |
|
"learning_rate": 5.3527682795623146e-05, |
|
"loss": 0.3338, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.07983783106037953, |
|
"learning_rate": 5.32222743897288e-05, |
|
"loss": 0.3398, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 0.07908192143487973, |
|
"learning_rate": 5.291742349764683e-05, |
|
"loss": 0.3582, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 0.08383309245612144, |
|
"learning_rate": 5.261313375270014e-05, |
|
"loss": 0.3816, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 0.07254861503772095, |
|
"learning_rate": 5.23094087815237e-05, |
|
"loss": 0.3414, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 0.06986123884274745, |
|
"learning_rate": 5.200625220402139e-05, |
|
"loss": 0.329, |
|
"step": 620 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 930, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 310, |
|
"total_flos": 3.3981380955000013e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|