|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.620551090700344, |
|
"eval_steps": 500, |
|
"global_step": 344, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9999830768577445e-05, |
|
"loss": 0.7381, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9999323080037623e-05, |
|
"loss": 0.7407, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9998476951563914e-05, |
|
"loss": 0.7359, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.999729241179462e-05, |
|
"loss": 0.7298, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9995769500822007e-05, |
|
"loss": 0.7305, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.999390827019096e-05, |
|
"loss": 0.7124, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9991708782897214e-05, |
|
"loss": 0.7076, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.998917111338525e-05, |
|
"loss": 0.6927, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9986295347545738e-05, |
|
"loss": 0.6858, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9983081582712684e-05, |
|
"loss": 0.6812, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9979529927660076e-05, |
|
"loss": 0.6788, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 0.6633, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9971413439169777e-05, |
|
"loss": 0.6589, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.996684888044506e-05, |
|
"loss": 0.6471, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9961946980917457e-05, |
|
"loss": 0.6474, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9956707906498046e-05, |
|
"loss": 0.6419, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.9951131834510034e-05, |
|
"loss": 0.6345, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.9945218953682736e-05, |
|
"loss": 0.635, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.99389694641452e-05, |
|
"loss": 0.6309, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.9932383577419432e-05, |
|
"loss": 0.619, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.9925461516413224e-05, |
|
"loss": 0.6184, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.9918203515412616e-05, |
|
"loss": 0.6127, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9910609820073986e-05, |
|
"loss": 0.6108, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.9902680687415704e-05, |
|
"loss": 0.6138, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.9894416385809444e-05, |
|
"loss": 0.6039, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.9885817194971116e-05, |
|
"loss": 0.6123, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.9876883405951378e-05, |
|
"loss": 0.5973, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.9867615321125796e-05, |
|
"loss": 0.6002, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.9858013254184597e-05, |
|
"loss": 0.5908, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.9848077530122083e-05, |
|
"loss": 0.596, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.983780848522559e-05, |
|
"loss": 0.5886, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.9827206467064133e-05, |
|
"loss": 0.5908, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.9816271834476642e-05, |
|
"loss": 0.5821, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.9805004957559795e-05, |
|
"loss": 0.5796, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 0.5779749751091003, |
|
"eval_runtime": 24.4017, |
|
"eval_samples_per_second": 8.196, |
|
"eval_steps_per_second": 0.533, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.9793406217655516e-05, |
|
"loss": 0.5719, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 0.5771, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.976921473040071e-05, |
|
"loss": 0.5709, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.9756622801842144e-05, |
|
"loss": 0.567, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.9743700647852356e-05, |
|
"loss": 0.5746, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.973044870579824e-05, |
|
"loss": 0.5651, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.9716867424208805e-05, |
|
"loss": 0.5621, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.9702957262759964e-05, |
|
"loss": 0.5581, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.9688718692259007e-05, |
|
"loss": 0.5555, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.967415219462864e-05, |
|
"loss": 0.5512, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.9659258262890683e-05, |
|
"loss": 0.554, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.964403740114939e-05, |
|
"loss": 0.5507, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.962849012457438e-05, |
|
"loss": 0.5482, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.961261695938319e-05, |
|
"loss": 0.5439, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.9596418442823495e-05, |
|
"loss": 0.5387, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 1.957989512315489e-05, |
|
"loss": 0.5434, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.9563047559630356e-05, |
|
"loss": 0.5388, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.954587632247732e-05, |
|
"loss": 0.5385, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.9528381992878362e-05, |
|
"loss": 0.5364, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 0.535, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.949242643573034e-05, |
|
"loss": 0.5253, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.9473966425143292e-05, |
|
"loss": 0.5278, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.945518575599317e-05, |
|
"loss": 0.5316, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.9436085063935837e-05, |
|
"loss": 0.5267, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.9416664995458756e-05, |
|
"loss": 0.5265, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 0.5235, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.937686936922145e-05, |
|
"loss": 0.5217, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.9356495158395317e-05, |
|
"loss": 0.5243, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.9335804264972018e-05, |
|
"loss": 0.5189, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.9314797389261426e-05, |
|
"loss": 0.5193, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 1.9293475242268224e-05, |
|
"loss": 0.5182, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 1.9271838545667876e-05, |
|
"loss": 0.5222, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 1.924988803178216e-05, |
|
"loss": 0.516, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.9227624443554425e-05, |
|
"loss": 0.5119, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 0.5121633410453796, |
|
"eval_runtime": 23.8105, |
|
"eval_samples_per_second": 8.4, |
|
"eval_steps_per_second": 0.546, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.9205048534524405e-05, |
|
"loss": 0.5095, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.9182161068802742e-05, |
|
"loss": 0.5091, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.9158962821045113e-05, |
|
"loss": 0.5086, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.913545457642601e-05, |
|
"loss": 0.507, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.9111637130612172e-05, |
|
"loss": 0.5096, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.9087511289735646e-05, |
|
"loss": 0.5124, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.9063077870366504e-05, |
|
"loss": 0.5013, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.9038337699485207e-05, |
|
"loss": 0.5048, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.9013291614454622e-05, |
|
"loss": 0.4988, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.8987940462991673e-05, |
|
"loss": 0.502, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.8962285103138637e-05, |
|
"loss": 0.4969, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.8936326403234125e-05, |
|
"loss": 0.503, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.891006524188368e-05, |
|
"loss": 0.4978, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.8883502507930044e-05, |
|
"loss": 0.4954, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.8856639100423062e-05, |
|
"loss": 0.4939, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 0.4942, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.880201391180111e-05, |
|
"loss": 0.4933, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.877425397954582e-05, |
|
"loss": 0.4946, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.874619707139396e-05, |
|
"loss": 0.4939, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 1.8717844136967626e-05, |
|
"loss": 0.4922, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.8689196135908303e-05, |
|
"loss": 0.4925, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 0.4902, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.8631018822358363e-05, |
|
"loss": 0.4866, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.860149147895366e-05, |
|
"loss": 0.4937, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.8571673007021124e-05, |
|
"loss": 0.4897, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.854156441580526e-05, |
|
"loss": 0.4879, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.8511166724369997e-05, |
|
"loss": 0.4888, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.848048096156426e-05, |
|
"loss": 0.4908, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.8449508165987106e-05, |
|
"loss": 0.4856, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.8418249385952575e-05, |
|
"loss": 0.4842, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.8386705679454243e-05, |
|
"loss": 0.4877, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.8354878114129368e-05, |
|
"loss": 0.491, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.832276776722278e-05, |
|
"loss": 0.4877, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 1.8290375725550417e-05, |
|
"loss": 0.4863, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 1.8257703085462542e-05, |
|
"loss": 0.4799, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"eval_loss": 0.4830611050128937, |
|
"eval_runtime": 23.8066, |
|
"eval_samples_per_second": 8.401, |
|
"eval_steps_per_second": 0.546, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 1.8224750952806626e-05, |
|
"loss": 0.4797, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.819152044288992e-05, |
|
"loss": 0.4833, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 1.8158012680441723e-05, |
|
"loss": 0.4833, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.8124228799575295e-05, |
|
"loss": 0.4818, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 1.8090169943749477e-05, |
|
"loss": 0.4885, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 1.8055837265729996e-05, |
|
"loss": 0.4785, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.802123192755044e-05, |
|
"loss": 0.4789, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.798635510047293e-05, |
|
"loss": 0.4795, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.795120796494848e-05, |
|
"loss": 0.478, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.7915791710577035e-05, |
|
"loss": 0.477, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.788010753606722e-05, |
|
"loss": 0.4746, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.784415664919576e-05, |
|
"loss": 0.4811, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.7807940266766595e-05, |
|
"loss": 0.477, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.777145961456971e-05, |
|
"loss": 0.4744, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.7734715927339642e-05, |
|
"loss": 0.4753, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.769771044871368e-05, |
|
"loss": 0.4754, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 0.4773, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.7622919136084183e-05, |
|
"loss": 0.4743, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.7585135833488692e-05, |
|
"loss": 0.479, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.7547095802227723e-05, |
|
"loss": 0.4747, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.7508800329814993e-05, |
|
"loss": 0.4763, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.7470250712409963e-05, |
|
"loss": 0.47, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.7431448254773943e-05, |
|
"loss": 0.4741, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.739239427022596e-05, |
|
"loss": 0.4753, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 1.735309008059829e-05, |
|
"loss": 0.4738, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.7313537016191706e-05, |
|
"loss": 0.4769, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 1.7273736415730488e-05, |
|
"loss": 0.4749, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.723368962631708e-05, |
|
"loss": 0.4703, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.7193398003386514e-05, |
|
"loss": 0.4722, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 1.7152862910660516e-05, |
|
"loss": 0.474, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 1.711208572010137e-05, |
|
"loss": 0.4721, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.7071067811865477e-05, |
|
"loss": 0.4755, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.702981057425662e-05, |
|
"loss": 0.4717, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.6988315403679e-05, |
|
"loss": 0.4712, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"eval_loss": 0.470627099275589, |
|
"eval_runtime": 23.8372, |
|
"eval_samples_per_second": 8.39, |
|
"eval_steps_per_second": 0.545, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.6946583704589973e-05, |
|
"loss": 0.4679, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 1.6904616889452497e-05, |
|
"loss": 0.4707, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.686241637868734e-05, |
|
"loss": 0.4683, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.6819983600624986e-05, |
|
"loss": 0.4715, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.6777319991457325e-05, |
|
"loss": 0.4721, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.6734426995189003e-05, |
|
"loss": 0.4719, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.6691306063588583e-05, |
|
"loss": 0.4662, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.6647958656139377e-05, |
|
"loss": 0.4684, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.6604386239990077e-05, |
|
"loss": 0.4671, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.6560590289905074e-05, |
|
"loss": 0.4671, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.6516572288214555e-05, |
|
"loss": 0.4642, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.6472333724764326e-05, |
|
"loss": 0.468, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.6427876096865394e-05, |
|
"loss": 0.4699, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.6383200909243285e-05, |
|
"loss": 0.4646, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.63383096739871e-05, |
|
"loss": 0.4626, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.6293203910498375e-05, |
|
"loss": 0.4665, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.6247885145439602e-05, |
|
"loss": 0.4654, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 1.6202354912682602e-05, |
|
"loss": 0.4695, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.6156614753256583e-05, |
|
"loss": 0.4646, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.6110666215296e-05, |
|
"loss": 0.4686, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.6064510853988137e-05, |
|
"loss": 0.4635, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.6018150231520486e-05, |
|
"loss": 0.4638, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.5971585917027864e-05, |
|
"loss": 0.4666, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.592481948653931e-05, |
|
"loss": 0.4668, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.5877852522924733e-05, |
|
"loss": 0.4656, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.5830686615841348e-05, |
|
"loss": 0.4652, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.5783323361679865e-05, |
|
"loss": 0.4651, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.573576436351046e-05, |
|
"loss": 0.4657, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.568801123102852e-05, |
|
"loss": 0.4654, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 1.5640065580500146e-05, |
|
"loss": 0.4633, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.5591929034707468e-05, |
|
"loss": 0.4628, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 1.5543603222893718e-05, |
|
"loss": 0.4691, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.5495089780708062e-05, |
|
"loss": 0.4644, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.5446390350150272e-05, |
|
"loss": 0.4633, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.539750657951513e-05, |
|
"loss": 0.4615, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"eval_loss": 0.4630681276321411, |
|
"eval_runtime": 23.8164, |
|
"eval_samples_per_second": 8.398, |
|
"eval_steps_per_second": 0.546, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 1.5348440123336647e-05, |
|
"loss": 0.4604, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 1.529919264233205e-05, |
|
"loss": 0.4624, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 1.5249765803345602e-05, |
|
"loss": 0.4629, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.5200161279292154e-05, |
|
"loss": 0.4616, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 1.5150380749100545e-05, |
|
"loss": 0.4681, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 1.5100425897656754e-05, |
|
"loss": 0.4653, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 1.5050298415746903e-05, |
|
"loss": 0.459, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.4589, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 1.4949532352830543e-05, |
|
"loss": 0.4627, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.4898897182380872e-05, |
|
"loss": 0.4579, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.4848096202463373e-05, |
|
"loss": 0.4612, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.4797131132502464e-05, |
|
"loss": 0.4598, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 1.4746003697476406e-05, |
|
"loss": 0.4615, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 1.469471562785891e-05, |
|
"loss": 0.4571, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 1.4643268659560571e-05, |
|
"loss": 0.4589, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.4591664533870118e-05, |
|
"loss": 0.4608, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 1.4539904997395468e-05, |
|
"loss": 0.4606, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 1.4487991802004625e-05, |
|
"loss": 0.4607, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 1.4435926704766364e-05, |
|
"loss": 0.4605, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 1.4383711467890776e-05, |
|
"loss": 0.4598, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 1.4331347858669631e-05, |
|
"loss": 0.4588, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 1.4278837649416543e-05, |
|
"loss": 0.4575, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 1.4226182617406996e-05, |
|
"loss": 0.46, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 1.417338454481818e-05, |
|
"loss": 0.4605, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 1.4120445218668687e-05, |
|
"loss": 0.4606, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 1.4067366430758004e-05, |
|
"loss": 0.4584, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 1.4014149977605893e-05, |
|
"loss": 0.4623, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 1.396079766039157e-05, |
|
"loss": 0.4586, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 1.3907311284892737e-05, |
|
"loss": 0.457, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 1.3853692661424485e-05, |
|
"loss": 0.4577, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 1.3799943604777993e-05, |
|
"loss": 0.4604, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 1.3746065934159123e-05, |
|
"loss": 0.4603, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 1.3692061473126845e-05, |
|
"loss": 0.46, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 1.3637932049531517e-05, |
|
"loss": 0.4584, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"eval_loss": 0.4574292004108429, |
|
"eval_runtime": 23.8087, |
|
"eval_samples_per_second": 8.4, |
|
"eval_steps_per_second": 0.546, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1.3583679495453e-05, |
|
"loss": 0.4533, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 1.3529305647138689e-05, |
|
"loss": 0.456, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 1.3474812344941315e-05, |
|
"loss": 0.4592, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.342020143325669e-05, |
|
"loss": 0.4559, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.3365474760461265e-05, |
|
"loss": 0.4606, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 1.3310634178849583e-05, |
|
"loss": 0.4607, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 1.3255681544571568e-05, |
|
"loss": 0.4557, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 1.3200618717569716e-05, |
|
"loss": 0.4549, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 1.3145447561516138e-05, |
|
"loss": 0.4555, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 1.3090169943749475e-05, |
|
"loss": 0.4557, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 1.3034787735211708e-05, |
|
"loss": 0.4536, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 1.297930281038482e-05, |
|
"loss": 0.459, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 1.2923717047227368e-05, |
|
"loss": 0.4543, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 1.2868032327110904e-05, |
|
"loss": 0.4528, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 1.2812250534756307e-05, |
|
"loss": 0.4574, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 1.2756373558169992e-05, |
|
"loss": 0.4533, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 1.270040328858001e-05, |
|
"loss": 0.4558, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 1.2644341620372025e-05, |
|
"loss": 0.4584, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 1.2588190451025209e-05, |
|
"loss": 0.4547, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 1.253195168104802e-05, |
|
"loss": 0.4568, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 1.2475627213913861e-05, |
|
"loss": 0.4547, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 1.2419218955996677e-05, |
|
"loss": 0.4523, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 1.2362728816506418e-05, |
|
"loss": 0.4556, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 1.2306158707424402e-05, |
|
"loss": 0.4561, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 1.2249510543438652e-05, |
|
"loss": 0.4548, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 1.2192786241879033e-05, |
|
"loss": 0.4569, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 1.2135987722652403e-05, |
|
"loss": 0.4561, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.2079116908177592e-05, |
|
"loss": 0.4535, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 1.2022175723320382e-05, |
|
"loss": 0.4538, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 1.1965166095328302e-05, |
|
"loss": 0.454, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 1.190808995376545e-05, |
|
"loss": 0.4555, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 1.1850949230447146e-05, |
|
"loss": 0.4567, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 1.1793745859374575e-05, |
|
"loss": 0.4571, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 0.4535, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 1.1679158920507773e-05, |
|
"loss": 0.4524, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"eval_loss": 0.45291438698768616, |
|
"eval_runtime": 23.8049, |
|
"eval_samples_per_second": 8.402, |
|
"eval_steps_per_second": 0.546, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 1.1621779231055677e-05, |
|
"loss": 0.4501, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 1.156434465040231e-05, |
|
"loss": 0.4529, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 1.1506857122494832e-05, |
|
"loss": 0.4559, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.1449318593072468e-05, |
|
"loss": 0.4523, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.1391731009600655e-05, |
|
"loss": 0.4572, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 1.1334096321205129e-05, |
|
"loss": 0.4543, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 1.127641647860595e-05, |
|
"loss": 0.452, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 1.1218693434051475e-05, |
|
"loss": 0.4529, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 1.1160929141252303e-05, |
|
"loss": 0.4528, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 1.110312555531512e-05, |
|
"loss": 0.4505, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 1.1045284632676535e-05, |
|
"loss": 0.4491, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 1.0987408331036879e-05, |
|
"loss": 0.4544, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 1.0929498609293925e-05, |
|
"loss": 0.4509, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 1.0871557427476585e-05, |
|
"loss": 0.4517, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 1.0813586746678584e-05, |
|
"loss": 0.4505, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 1.0755588528992082e-05, |
|
"loss": 0.4508, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 1.0697564737441254e-05, |
|
"loss": 0.4537, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 1.0639517335915857e-05, |
|
"loss": 0.4543, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 1.0581448289104759e-05, |
|
"loss": 0.4532, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 1.0523359562429441e-05, |
|
"loss": 0.4532, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 1.046525312197747e-05, |
|
"loss": 0.4507, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 1.040713093443596e-05, |
|
"loss": 0.4494, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 1.0348994967025012e-05, |
|
"loss": 0.4531, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 1.0290847187431115e-05, |
|
"loss": 0.4525, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 1.0232689563740563e-05, |
|
"loss": 0.452, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 1.0174524064372837e-05, |
|
"loss": 0.4527, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 1.0116352658013973e-05, |
|
"loss": 0.4532, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 1.005817731354994e-05, |
|
"loss": 0.4507, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4504, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 9.941822686450061e-06, |
|
"loss": 0.4536, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 9.883647341986032e-06, |
|
"loss": 0.4515, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 9.825475935627165e-06, |
|
"loss": 0.4537, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 9.767310436259438e-06, |
|
"loss": 0.4527, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 9.709152812568886e-06, |
|
"loss": 0.4507, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"eval_loss": 0.4502379596233368, |
|
"eval_runtime": 23.8128, |
|
"eval_samples_per_second": 8.399, |
|
"eval_steps_per_second": 0.546, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 9.651005032974994e-06, |
|
"loss": 0.4474, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 9.592869065564043e-06, |
|
"loss": 0.4489, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 9.534746878022533e-06, |
|
"loss": 0.4527, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 9.476640437570562e-06, |
|
"loss": 0.4495, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 9.418551710895243e-06, |
|
"loss": 0.4567, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 9.360482664084144e-06, |
|
"loss": 0.4523, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 9.302435262558748e-06, |
|
"loss": 0.4485, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 9.244411471007923e-06, |
|
"loss": 0.4492, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"learning_rate": 9.18641325332142e-06, |
|
"loss": 0.4508, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 9.128442572523418e-06, |
|
"loss": 0.4486, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 9.07050139070608e-06, |
|
"loss": 0.4495, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 9.012591668963123e-06, |
|
"loss": 0.4521, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"learning_rate": 8.954715367323468e-06, |
|
"loss": 0.4472, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 8.896874444684882e-06, |
|
"loss": 0.4494, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 8.839070858747697e-06, |
|
"loss": 0.4482, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 8.781306565948528e-06, |
|
"loss": 0.4488, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 8.723583521394054e-06, |
|
"loss": 0.4527, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 8.665903678794873e-06, |
|
"loss": 0.4499, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 8.60826899039935e-06, |
|
"loss": 0.4511, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 8.550681406927534e-06, |
|
"loss": 0.4528, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 8.49314287750517e-06, |
|
"loss": 0.4465, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 8.39, |
|
"learning_rate": 8.43565534959769e-06, |
|
"loss": 0.4467, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 8.378220768944328e-06, |
|
"loss": 0.4519, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 8.32084107949223e-06, |
|
"loss": 0.4517, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 0.4487, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"learning_rate": 8.206254140625425e-06, |
|
"loss": 0.4514, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 8.149050769552856e-06, |
|
"loss": 0.4524, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 8.091910046234552e-06, |
|
"loss": 0.4493, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 8.034833904671698e-06, |
|
"loss": 0.4495, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 8.54, |
|
"learning_rate": 7.977824276679623e-06, |
|
"loss": 0.4501, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 7.92088309182241e-06, |
|
"loss": 0.4482, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 7.864012277347602e-06, |
|
"loss": 0.4539, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 7.807213758120965e-06, |
|
"loss": 0.449, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 7.750489456561351e-06, |
|
"loss": 0.4486, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 7.6938412925756e-06, |
|
"loss": 0.4478, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"eval_loss": 0.4479629397392273, |
|
"eval_runtime": 23.8304, |
|
"eval_samples_per_second": 8.393, |
|
"eval_steps_per_second": 0.546, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 7.637271183493587e-06, |
|
"loss": 0.4482, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 7.580781044003324e-06, |
|
"loss": 0.4462, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 7.524372786086143e-06, |
|
"loss": 0.4488, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 7.468048318951983e-06, |
|
"loss": 0.4521, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 7.411809548974792e-06, |
|
"loss": 0.4528, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 9.11, |
|
"learning_rate": 7.355658379627981e-06, |
|
"loss": 0.4497, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 7.299596711419994e-06, |
|
"loss": 0.447, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 7.243626441830009e-06, |
|
"loss": 0.4488, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"learning_rate": 7.187749465243694e-06, |
|
"loss": 0.4473, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 7.131967672889101e-06, |
|
"loss": 0.4494, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 7.076282952772634e-06, |
|
"loss": 0.4461, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 7.02069718961518e-06, |
|
"loss": 0.4505, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"learning_rate": 6.9652122647882966e-06, |
|
"loss": 0.4476, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 0.445, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 6.854552438483866e-06, |
|
"loss": 0.4494, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 6.799381282430284e-06, |
|
"loss": 0.4464, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 6.744318455428436e-06, |
|
"loss": 0.4503, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 6.689365821150421e-06, |
|
"loss": 0.4503, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 6.634525239538736e-06, |
|
"loss": 0.4485, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 6.579798566743314e-06, |
|
"loss": 0.4492, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 6.525187655058687e-06, |
|
"loss": 0.4462, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 6.4706943528613135e-06, |
|
"loss": 0.4499, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"learning_rate": 6.4163205045469975e-06, |
|
"loss": 0.4478, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 6.362067950468489e-06, |
|
"loss": 0.4491, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 6.3079385268731575e-06, |
|
"loss": 0.4481, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 6.25393406584088e-06, |
|
"loss": 0.4509, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 9.49, |
|
"learning_rate": 6.200056395222012e-06, |
|
"loss": 0.4489, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 6.146307338575519e-06, |
|
"loss": 0.4469, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 6.092688715107265e-06, |
|
"loss": 0.4472, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 6.039202339608432e-06, |
|
"loss": 0.4499, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 5.9858500223941066e-06, |
|
"loss": 0.4483, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 5.932633569242e-06, |
|
"loss": 0.4494, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 5.879554781331317e-06, |
|
"loss": 0.4528, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 5.8266154551818225e-06, |
|
"loss": 0.4467, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"eval_loss": 0.4474964439868927, |
|
"eval_runtime": 23.8091, |
|
"eval_samples_per_second": 8.4, |
|
"eval_steps_per_second": 0.546, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"step": 344, |
|
"total_flos": 6.414434999835034e+16, |
|
"train_loss": 0.4900048969443454, |
|
"train_runtime": 108147.3514, |
|
"train_samples_per_second": 2.577, |
|
"train_steps_per_second": 0.005 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 540, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 6.414434999835034e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|