|
{ |
|
"best_metric": 1.8478573560714722, |
|
"best_model_checkpoint": "./llmTechChat-lora/checkpoint-320", |
|
"epoch": 2.964925954793453, |
|
"eval_steps": 40, |
|
"global_step": 480, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 4.3577, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 4.326064109802246, |
|
"eval_runtime": 288.7431, |
|
"eval_samples_per_second": 206.796, |
|
"eval_steps_per_second": 206.796, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 4.2951, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 3.9156, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 3.4836, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00015, |
|
"loss": 3.1743, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 2.8242, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 2.7478, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 2.7198, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00027, |
|
"loss": 2.6025, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0003, |
|
"loss": 2.5337, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00029999813499925374, |
|
"loss": 2.5019, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002999925400433914, |
|
"loss": 2.3393, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00029998321527154097, |
|
"loss": 2.3796, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002999701609155785, |
|
"loss": 2.3054, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002999533773001224, |
|
"loss": 2.2898, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00029993286484252544, |
|
"loss": 2.2254, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00029990862405286433, |
|
"loss": 2.2197, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0002998806555339269, |
|
"loss": 2.1797, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002998489599811972, |
|
"loss": 2.1854, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002998135381828383, |
|
"loss": 2.2105, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00029977439101967274, |
|
"loss": 2.1866, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00029973151946516025, |
|
"loss": 2.1718, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0002996849245853739, |
|
"loss": 2.1158, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002996346075389736, |
|
"loss": 2.1495, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00029958056957717696, |
|
"loss": 2.1326, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00029952281204372863, |
|
"loss": 2.1391, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0002994613363748664, |
|
"loss": 2.1039, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00029939614409928584, |
|
"loss": 2.132, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00029932723683810225, |
|
"loss": 2.1278, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0002992546163048102, |
|
"loss": 2.0698, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00029917828430524096, |
|
"loss": 2.0757, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0002990982427375177, |
|
"loss": 2.0689, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002990144935920083, |
|
"loss": 2.0986, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002989270389512756, |
|
"loss": 2.058, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002988358809900258, |
|
"loss": 2.0451, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00029874102197505447, |
|
"loss": 2.0613, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0002986424642651902, |
|
"loss": 2.0796, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0002985402103112355, |
|
"loss": 2.086, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00029843426265590656, |
|
"loss": 2.0275, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0002983246239337692, |
|
"loss": 2.0615, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 2.0476396083831787, |
|
"eval_runtime": 289.9408, |
|
"eval_samples_per_second": 205.942, |
|
"eval_steps_per_second": 205.942, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0002982112968711744, |
|
"loss": 2.1012, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0002980942842861893, |
|
"loss": 2.0537, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00029797358908852816, |
|
"loss": 2.0595, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00029784921427947946, |
|
"loss": 2.0409, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0002977211629518312, |
|
"loss": 2.0045, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00029758943828979444, |
|
"loss": 2.0176, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002974540435689237, |
|
"loss": 2.0189, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0002973149821560358, |
|
"loss": 2.0169, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00029717225750912585, |
|
"loss": 2.0553, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00029702587317728153, |
|
"loss": 2.0569, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0002968758328005947, |
|
"loss": 2.0522, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0002967221401100708, |
|
"loss": 2.0285, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00029656479892753635, |
|
"loss": 2.0266, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0002964038131655436, |
|
"loss": 2.0161, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0002962391868272735, |
|
"loss": 2.0122, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00029607092400643593, |
|
"loss": 1.9926, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000295899028887168, |
|
"loss": 2.0123, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002957235057439301, |
|
"loss": 2.0121, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0002955443589413994, |
|
"loss": 2.0245, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00029536159293436166, |
|
"loss": 2.0127, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0002951752122676, |
|
"loss": 2.0057, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000294985221575782, |
|
"loss": 2.0226, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0002947916255833451, |
|
"loss": 2.0032, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00029459442910437797, |
|
"loss": 2.045, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00029439363704250176, |
|
"loss": 1.9794, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0002941892543907478, |
|
"loss": 2.0009, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0002939812862314333, |
|
"loss": 1.9508, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00029376973773603533, |
|
"loss": 1.9913, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0002935546141650618, |
|
"loss": 1.9762, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00029333592086792107, |
|
"loss": 2.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002931136632827886, |
|
"loss": 1.9629, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0002928878469364719, |
|
"loss": 2.0009, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 1.9714, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0002924255605098489, |
|
"loss": 1.9474, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002921891019250697, |
|
"loss": 1.9959, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002919491075698746, |
|
"loss": 1.9846, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00029170558341212554, |
|
"loss": 1.9978, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00029145853550745904, |
|
"loss": 1.9527, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00029120796999913546, |
|
"loss": 1.9585, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0002909538931178862, |
|
"loss": 1.9905, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 1.96906578540802, |
|
"eval_runtime": 291.8459, |
|
"eval_samples_per_second": 204.598, |
|
"eval_steps_per_second": 204.598, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029069631118175903, |
|
"loss": 1.9926, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029043523059596053, |
|
"loss": 1.9916, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002901706578526973, |
|
"loss": 1.9545, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002899025995310141, |
|
"loss": 1.9399, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00028963106229663063, |
|
"loss": 1.9515, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00028935605290177535, |
|
"loss": 1.9855, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002890775781850181, |
|
"loss": 2.0159, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00028879564507109946, |
|
"loss": 1.9885, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00028851026057075916, |
|
"loss": 1.9625, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00028822143178056114, |
|
"loss": 1.9161, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002879291658827176, |
|
"loss": 1.9141, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00028763347014491, |
|
"loss": 1.9867, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00028733435192010887, |
|
"loss": 1.9325, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0002870318186463901, |
|
"loss": 1.9517, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00028672587784675096, |
|
"loss": 1.9435, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002864165371289223, |
|
"loss": 1.9428, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002861038041851797, |
|
"loss": 1.9182, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0002857876867921522, |
|
"loss": 1.9344, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002854681928106287, |
|
"loss": 1.9652, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002851453301853628, |
|
"loss": 1.9332, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000284819106944875, |
|
"loss": 1.9042, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0002844895312012531, |
|
"loss": 1.9571, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0002841566111499505, |
|
"loss": 1.9129, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002838203550695825, |
|
"loss": 1.9347, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00028348077132172027, |
|
"loss": 1.9461, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002831378683506831, |
|
"loss": 1.9188, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00028279165468332823, |
|
"loss": 1.9491, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.000282442138928839, |
|
"loss": 1.961, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00028208932977851067, |
|
"loss": 1.9048, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002817332360055343, |
|
"loss": 1.9493, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0002813738664647784, |
|
"loss": 1.9685, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00028101123009256947, |
|
"loss": 1.9054, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002806453359064686, |
|
"loss": 1.9317, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00028027619300504834, |
|
"loss": 1.9701, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002799038105676658, |
|
"loss": 1.9426, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002795281978542346, |
|
"loss": 1.957, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002791493642049947, |
|
"loss": 1.9535, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002787673190402799, |
|
"loss": 1.9045, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00027838207186028376, |
|
"loss": 1.9575, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002779936322448233, |
|
"loss": 1.8699, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 1.9343771934509277, |
|
"eval_runtime": 292.8194, |
|
"eval_samples_per_second": 203.917, |
|
"eval_steps_per_second": 203.917, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002776020098531009, |
|
"loss": 1.956, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00027720721442346387, |
|
"loss": 1.8958, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0002768092557731625, |
|
"loss": 1.9157, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00027640814379810587, |
|
"loss": 1.9118, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002760038884726157, |
|
"loss": 1.9707, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002755964998491785, |
|
"loss": 1.9563, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0002751859880581954, |
|
"loss": 1.9825, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002747723633077303, |
|
"loss": 1.9687, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002743556358832562, |
|
"loss": 1.9378, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00027393581614739923, |
|
"loss": 1.9307, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00027351291453968086, |
|
"loss": 1.9333, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0002730869415762587, |
|
"loss": 1.9229, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002726579078496647, |
|
"loss": 1.911, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00027222582402854176, |
|
"loss": 1.9556, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0002717907008573785, |
|
"loss": 1.9008, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 1.9651, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002709113798205093, |
|
"loss": 1.9337, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00027046720382059526, |
|
"loss": 1.9485, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00027002003220168093, |
|
"loss": 1.8647, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002695698760834384, |
|
"loss": 1.9288, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00026911674665975417, |
|
"loss": 1.9535, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002686606551984512, |
|
"loss": 1.932, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00026820161304100823, |
|
"loss": 1.9516, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0002677396316022783, |
|
"loss": 1.9347, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00026727472237020447, |
|
"loss": 1.9473, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0002668068969055341, |
|
"loss": 1.9428, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0002663361668415318, |
|
"loss": 1.9204, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0002658625438836899, |
|
"loss": 1.9039, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002653860398094373, |
|
"loss": 1.9166, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00026490666646784665, |
|
"loss": 1.9072, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00026442443577933994, |
|
"loss": 1.9014, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002639393597353917, |
|
"loss": 1.9272, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00026345145039823097, |
|
"loss": 1.9274, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00026296071990054165, |
|
"loss": 1.9548, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0002624671804451601, |
|
"loss": 1.928, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0002619708443047725, |
|
"loss": 1.9072, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00026147172382160914, |
|
"loss": 1.9116, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002609698314071376, |
|
"loss": 1.915, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002604651795417543, |
|
"loss": 1.915, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0002599577807744739, |
|
"loss": 1.9604, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.911091685295105, |
|
"eval_runtime": 289.5613, |
|
"eval_samples_per_second": 206.212, |
|
"eval_steps_per_second": 206.212, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0002594476477226176, |
|
"loss": 1.9335, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00025893479307149893, |
|
"loss": 1.9033, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0002584192295741087, |
|
"loss": 1.874, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00025790097005079764, |
|
"loss": 1.7896, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0002573800273889577, |
|
"loss": 1.7936, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00025685641454270173, |
|
"loss": 1.8321, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00025633014453254086, |
|
"loss": 1.8105, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002558012304450613, |
|
"loss": 1.8068, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0002552696854325987, |
|
"loss": 1.7953, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0002547355227129109, |
|
"loss": 1.8073, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0002541987555688496, |
|
"loss": 1.8095, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0002536593973480297, |
|
"loss": 1.7939, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0002531174614624977, |
|
"loss": 1.8226, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.000252572961388398, |
|
"loss": 1.7815, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00025202591066563786, |
|
"loss": 1.7929, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00025147632289755075, |
|
"loss": 1.8027, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002509242117505579, |
|
"loss": 1.8349, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0002503695909538287, |
|
"loss": 1.7828, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0002498124742989391, |
|
"loss": 1.814, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0002492528756395289, |
|
"loss": 1.821, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0002486908088909569, |
|
"loss": 1.8023, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0002481262880299552, |
|
"loss": 1.7953, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0002475593270942814, |
|
"loss": 1.8497, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0002469899401823699, |
|
"loss": 1.7957, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0002464181414529809, |
|
"loss": 1.7958, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002458439451248484, |
|
"loss": 1.8024, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002452673654763268, |
|
"loss": 1.8306, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0002446884168450358, |
|
"loss": 1.8174, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00024410711362750386, |
|
"loss": 1.8032, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00024352347027881003, |
|
"loss": 1.8087, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0002429375013122247, |
|
"loss": 1.7648, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0002423492212988487, |
|
"loss": 1.8544, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00024175864486725092, |
|
"loss": 1.7978, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0002411657867031045, |
|
"loss": 1.7866, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0002405706615488216, |
|
"loss": 1.8074, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00023997328420318704, |
|
"loss": 1.7842, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00023937366952099005, |
|
"loss": 1.781, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00023877183241265514, |
|
"loss": 1.786, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 1.8091, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00023756155083521846, |
|
"loss": 1.7684, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"eval_loss": 1.897764801979065, |
|
"eval_runtime": 292.524, |
|
"eval_samples_per_second": 204.123, |
|
"eval_steps_per_second": 204.123, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00023695313646179735, |
|
"loss": 1.7693, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00023634255985285102, |
|
"loss": 1.7598, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00023572983619139058, |
|
"loss": 1.8441, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00023511498071381726, |
|
"loss": 1.7768, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00023449800870954326, |
|
"loss": 1.784, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00023387893552061199, |
|
"loss": 1.7905, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00023325777654131623, |
|
"loss": 1.7996, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00023263454721781537, |
|
"loss": 1.7546, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0002320092630477515, |
|
"loss": 1.8043, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0002313819395798639, |
|
"loss": 1.7817, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00023075259241360233, |
|
"loss": 1.8117, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00023012123719873926, |
|
"loss": 1.7798, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002294878896349807, |
|
"loss": 1.763, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00022885256547157566, |
|
"loss": 1.8169, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0002282152805069247, |
|
"loss": 1.797, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00022757605058818688, |
|
"loss": 1.7768, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0002269348916108859, |
|
"loss": 1.8005, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00022629181951851473, |
|
"loss": 1.7734, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0002256468503021391, |
|
"loss": 1.774, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.000225, |
|
"loss": 1.7865, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00022435128469711465, |
|
"loss": 1.7613, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00022370072052487668, |
|
"loss": 1.8013, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00022304832366065505, |
|
"loss": 1.7617, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00022239411032739162, |
|
"loss": 1.7323, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00022173809679319772, |
|
"loss": 1.7838, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0002210802993709498, |
|
"loss": 1.8099, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00022042073441788358, |
|
"loss": 1.82, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00021975941833518757, |
|
"loss": 1.772, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00021909636756759483, |
|
"loss": 1.8044, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00021843159860297442, |
|
"loss": 1.7679, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00021776512797192123, |
|
"loss": 1.7786, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00021709697224734487, |
|
"loss": 1.8305, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00021642714804405772, |
|
"loss": 1.8044, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0002157556720183616, |
|
"loss": 1.7838, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00021508256086763368, |
|
"loss": 1.7639, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00021440783132991136, |
|
"loss": 1.8067, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00021373150018347576, |
|
"loss": 1.772, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0002130535842464348, |
|
"loss": 1.7985, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00021237410037630493, |
|
"loss": 1.7746, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00021169306546959174, |
|
"loss": 1.7673, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_loss": 1.880852222442627, |
|
"eval_runtime": 291.0717, |
|
"eval_samples_per_second": 205.142, |
|
"eval_steps_per_second": 205.142, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00021101049646137003, |
|
"loss": 1.7555, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0002103264103248626, |
|
"loss": 1.7722, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00020964082407101824, |
|
"loss": 1.7686, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00020895375474808852, |
|
"loss": 1.7625, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0002082652194412042, |
|
"loss": 1.7524, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00020757523527195005, |
|
"loss": 1.7785, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00020688381939793928, |
|
"loss": 1.7901, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002061909890123868, |
|
"loss": 1.7941, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00020549676134368184, |
|
"loss": 1.8157, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00020480115365495926, |
|
"loss": 1.8354, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00020410418324367055, |
|
"loss": 1.7764, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002034058674411535, |
|
"loss": 1.7677, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002027062236122014, |
|
"loss": 1.7739, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00020200526915463107, |
|
"loss": 1.7746, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00020130302149885031, |
|
"loss": 1.7675, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0002005994981074245, |
|
"loss": 1.737, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001998947164746423, |
|
"loss": 1.8318, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00019918869412608066, |
|
"loss": 1.7959, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00019848144861816898, |
|
"loss": 1.7567, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00019777299753775265, |
|
"loss": 1.7896, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0001970633585016556, |
|
"loss": 1.7706, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 1.7561, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00019564058717697847, |
|
"loss": 1.8023, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00019492749026799288, |
|
"loss": 1.8121, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00019421327616163563, |
|
"loss": 1.791, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00019349796261803793, |
|
"loss": 1.7758, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00019278156742467032, |
|
"loss": 1.755, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0001920641083959004, |
|
"loss": 1.6986, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00019134560337254986, |
|
"loss": 1.7569, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00019062607022145078, |
|
"loss": 1.7849, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00018990552683500125, |
|
"loss": 1.7833, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00018918399113072076, |
|
"loss": 1.7739, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00018846148105080424, |
|
"loss": 1.7765, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00018773801456167628, |
|
"loss": 1.6825, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00018701360965354402, |
|
"loss": 1.758, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00018628828433995013, |
|
"loss": 1.7477, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00018556205665732462, |
|
"loss": 1.7757, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00018483494466453636, |
|
"loss": 1.7946, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0001841069664424442, |
|
"loss": 1.7968, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00018337814009344714, |
|
"loss": 1.7296, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"eval_loss": 1.8629968166351318, |
|
"eval_runtime": 293.6177, |
|
"eval_samples_per_second": 203.363, |
|
"eval_steps_per_second": 203.363, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00018264848374103433, |
|
"loss": 1.7516, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00018191801552933432, |
|
"loss": 1.7466, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00018118675362266385, |
|
"loss": 1.7304, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001804547162050764, |
|
"loss": 1.7519, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001797219214799096, |
|
"loss": 1.7721, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00017898838766933298, |
|
"loss": 1.7455, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00017825413301389453, |
|
"loss": 1.7674, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00017751917577206734, |
|
"loss": 1.7581, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00017678353421979548, |
|
"loss": 1.7714, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00017604722665003956, |
|
"loss": 1.7696, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0001753102713723217, |
|
"loss": 1.7903, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00017457268671227063, |
|
"loss": 1.7563, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00017383449101116547, |
|
"loss": 1.7828, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00017309570262548, |
|
"loss": 1.7417, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00017235633992642615, |
|
"loss": 1.7491, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.000171616421299497, |
|
"loss": 1.7316, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0001708759651440098, |
|
"loss": 1.785, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00017013498987264832, |
|
"loss": 1.7413, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00016939351391100497, |
|
"loss": 1.7874, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00016865155569712278, |
|
"loss": 1.7377, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0001679091336810366, |
|
"loss": 1.7814, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00016716626632431477, |
|
"loss": 1.7343, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00016642297209959955, |
|
"loss": 1.7549, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.000165679269490148, |
|
"loss": 1.8005, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0001649351769893725, |
|
"loss": 1.8071, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00016419071310038057, |
|
"loss": 1.777, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00016344589633551502, |
|
"loss": 1.7851, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00016270074521589347, |
|
"loss": 1.7768, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00016195527827094787, |
|
"loss": 1.7751, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00016120951403796364, |
|
"loss": 1.7342, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00016046347106161876, |
|
"loss": 1.7465, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00015971716789352274, |
|
"loss": 1.7957, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00015897062309175512, |
|
"loss": 1.785, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.000158223855220404, |
|
"loss": 1.7575, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00015747688284910457, |
|
"loss": 1.7998, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00015672972455257723, |
|
"loss": 1.7767, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00015598239891016574, |
|
"loss": 1.7811, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00015523492450537517, |
|
"loss": 1.7671, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00015448731992540976, |
|
"loss": 1.7707, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00015373960376071093, |
|
"loss": 1.7737, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"eval_loss": 1.8478573560714722, |
|
"eval_runtime": 291.1645, |
|
"eval_samples_per_second": 205.077, |
|
"eval_steps_per_second": 205.077, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0001529917946044947, |
|
"loss": 1.7395, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00015224391105228953, |
|
"loss": 1.7596, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00015149597170147387, |
|
"loss": 1.7475, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0001507479951508137, |
|
"loss": 1.7828, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00015, |
|
"loss": 1.7394, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001492520048491863, |
|
"loss": 1.6713, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0001485040282985261, |
|
"loss": 1.5788, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00014775608894771047, |
|
"loss": 1.6083, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0001470082053955053, |
|
"loss": 1.5962, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.00014626039623928907, |
|
"loss": 1.564, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00014551268007459024, |
|
"loss": 1.5529, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0001447650754946249, |
|
"loss": 1.5699, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0001440176010898343, |
|
"loss": 1.5667, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0001432702754474228, |
|
"loss": 1.5275, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0001425231171508954, |
|
"loss": 1.5585, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00014177614477959595, |
|
"loss": 1.5753, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00014102937690824486, |
|
"loss": 1.5479, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00014028283210647718, |
|
"loss": 1.5721, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00013953652893838119, |
|
"loss": 1.5526, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00013879048596203636, |
|
"loss": 1.5777, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.00013804472172905213, |
|
"loss": 1.5216, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0001372992547841065, |
|
"loss": 1.5776, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00013655410366448498, |
|
"loss": 1.5717, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00013580928689961943, |
|
"loss": 1.5628, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0001350648230106275, |
|
"loss": 1.5333, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.000134320730509852, |
|
"loss": 1.5271, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00013357702790040048, |
|
"loss": 1.5672, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00013283373367568523, |
|
"loss": 1.5619, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00013209086631896336, |
|
"loss": 1.5461, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00013134844430287725, |
|
"loss": 1.5724, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00013060648608899503, |
|
"loss": 1.5292, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0001298650101273517, |
|
"loss": 1.5703, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0001291240348559902, |
|
"loss": 1.5444, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.000128383578700503, |
|
"loss": 1.5848, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0001276436600735738, |
|
"loss": 1.603, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00012690429737451992, |
|
"loss": 1.5961, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0001261655089888345, |
|
"loss": 1.5635, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00012542731328772934, |
|
"loss": 1.5743, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00012468972862767825, |
|
"loss": 1.5291, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00012395277334996044, |
|
"loss": 1.5871, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 1.8882598876953125, |
|
"eval_runtime": 292.5066, |
|
"eval_samples_per_second": 204.136, |
|
"eval_steps_per_second": 204.136, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0001232164657802045, |
|
"loss": 1.5044, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00012248082422793266, |
|
"loss": 1.5682, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00012174586698610547, |
|
"loss": 1.564, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00012101161233066703, |
|
"loss": 1.5595, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00012027807852009038, |
|
"loss": 1.5228, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00011954528379492359, |
|
"loss": 1.5444, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00011881324637733611, |
|
"loss": 1.5731, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0001180819844706657, |
|
"loss": 1.5853, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00011735151625896565, |
|
"loss": 1.583, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00011662185990655284, |
|
"loss": 1.5859, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011589303355755579, |
|
"loss": 1.5597, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011516505533546363, |
|
"loss": 1.5692, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00011443794334267538, |
|
"loss": 1.5628, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00011371171566004985, |
|
"loss": 1.5875, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00011298639034645593, |
|
"loss": 1.545, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.00011226198543832372, |
|
"loss": 1.5687, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.00011153851894919574, |
|
"loss": 1.5379, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00011081600886927924, |
|
"loss": 1.5287, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00011009447316499873, |
|
"loss": 1.5763, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00010937392977854923, |
|
"loss": 1.5675, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00010865439662745013, |
|
"loss": 1.5487, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0001079358916040996, |
|
"loss": 1.5471, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00010721843257532968, |
|
"loss": 1.5631, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00010650203738196206, |
|
"loss": 1.5721, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00010578672383836435, |
|
"loss": 1.5811, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0001050725097320071, |
|
"loss": 1.5824, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.00010435941282302154, |
|
"loss": 1.5706, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 1.5383, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00010293664149834444, |
|
"loss": 1.5719, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00010222700246224735, |
|
"loss": 1.5167, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.00010151855138183102, |
|
"loss": 1.5552, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.00010081130587391934, |
|
"loss": 1.5735, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.00010010528352535771, |
|
"loss": 1.5604, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.94005018925755e-05, |
|
"loss": 1.5332, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.869697850114969e-05, |
|
"loss": 1.5617, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 9.799473084536891e-05, |
|
"loss": 1.5573, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 9.729377638779857e-05, |
|
"loss": 1.5541, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 9.659413255884647e-05, |
|
"loss": 1.5915, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 9.589581675632944e-05, |
|
"loss": 1.5829, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 9.519884634504074e-05, |
|
"loss": 1.5339, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"eval_loss": 1.8761346340179443, |
|
"eval_runtime": 290.7874, |
|
"eval_samples_per_second": 205.342, |
|
"eval_steps_per_second": 205.342, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 9.450323865631816e-05, |
|
"loss": 1.5532, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 9.380901098761319e-05, |
|
"loss": 1.5511, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 9.311618060206074e-05, |
|
"loss": 1.5456, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 9.242476472804995e-05, |
|
"loss": 1.5488, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.17347805587958e-05, |
|
"loss": 1.5898, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.104624525191145e-05, |
|
"loss": 1.5482, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 9.035917592898177e-05, |
|
"loss": 1.5348, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 8.967358967513738e-05, |
|
"loss": 1.5588, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 8.898950353862998e-05, |
|
"loss": 1.5553, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 8.830693453040829e-05, |
|
"loss": 1.5642, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 8.762589962369511e-05, |
|
"loss": 1.5557, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 8.694641575356519e-05, |
|
"loss": 1.5578, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 8.626849981652424e-05, |
|
"loss": 1.574, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 8.55921686700886e-05, |
|
"loss": 1.5666, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 8.491743913236628e-05, |
|
"loss": 1.5562, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 8.424432798163836e-05, |
|
"loss": 1.5529, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 8.357285195594228e-05, |
|
"loss": 1.538, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 8.290302775265509e-05, |
|
"loss": 1.5782, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 8.223487202807877e-05, |
|
"loss": 1.55, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 8.156840139702554e-05, |
|
"loss": 1.5439, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.090363243240517e-05, |
|
"loss": 1.5627, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.024058166481243e-05, |
|
"loss": 1.5693, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 7.957926558211642e-05, |
|
"loss": 1.5816, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.89197006290502e-05, |
|
"loss": 1.5464, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.82619032068023e-05, |
|
"loss": 1.5494, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.760588967260838e-05, |
|
"loss": 1.5711, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.69516763393449e-05, |
|
"loss": 1.5801, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.629927947512331e-05, |
|
"loss": 1.5628, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 7.564871530288536e-05, |
|
"loss": 1.5601, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 7.500000000000002e-05, |
|
"loss": 1.5453, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 7.435314969786088e-05, |
|
"loss": 1.5684, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 7.370818048148527e-05, |
|
"loss": 1.5656, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 7.30651083891141e-05, |
|
"loss": 1.5735, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 7.242394941181308e-05, |
|
"loss": 1.5587, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 7.17847194930753e-05, |
|
"loss": 1.5735, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 7.114743452842427e-05, |
|
"loss": 1.5656, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 7.051211036501928e-05, |
|
"loss": 1.5228, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 6.987876280126068e-05, |
|
"loss": 1.5452, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 6.924740758639768e-05, |
|
"loss": 1.5777, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.86180604201361e-05, |
|
"loss": 1.5589, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_loss": 1.8656864166259766, |
|
"eval_runtime": 292.9263, |
|
"eval_samples_per_second": 203.843, |
|
"eval_steps_per_second": 203.843, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.799073695224846e-05, |
|
"loss": 1.562, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 6.736545278218463e-05, |
|
"loss": 1.5788, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 6.674222345868376e-05, |
|
"loss": 1.5532, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 6.612106447938799e-05, |
|
"loss": 1.5986, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.550199129045668e-05, |
|
"loss": 1.573, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.488501928618274e-05, |
|
"loss": 1.5544, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 6.427016380860937e-05, |
|
"loss": 1.5651, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.365744014714898e-05, |
|
"loss": 1.5261, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.304686353820266e-05, |
|
"loss": 1.5325, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 6.243844916478155e-05, |
|
"loss": 1.5435, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 6.183221215612904e-05, |
|
"loss": 1.546, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 6.122816758734487e-05, |
|
"loss": 1.5508, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 6.0626330479009845e-05, |
|
"loss": 1.5678, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 6.002671579681294e-05, |
|
"loss": 1.523, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 5.9429338451178355e-05, |
|
"loss": 1.5271, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 5.88342132968955e-05, |
|
"loss": 1.5285, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 5.824135513274902e-05, |
|
"loss": 1.5476, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.765077870115125e-05, |
|
"loss": 1.5216, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.706249868777526e-05, |
|
"loss": 1.547, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 5.6476529721189974e-05, |
|
"loss": 1.5398, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.589288637249612e-05, |
|
"loss": 1.579, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.531158315496417e-05, |
|
"loss": 1.4801, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 5.473263452367318e-05, |
|
"loss": 1.5465, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 5.415605487515164e-05, |
|
"loss": 1.5303, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 5.358185854701909e-05, |
|
"loss": 1.5519, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.3010059817630066e-05, |
|
"loss": 1.5818, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.244067290571856e-05, |
|
"loss": 1.5413, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 5.187371197004485e-05, |
|
"loss": 1.5372, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.130919110904311e-05, |
|
"loss": 1.5459, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.074712436047112e-05, |
|
"loss": 1.5488, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.018752570106086e-05, |
|
"loss": 1.5332, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.963040904617131e-05, |
|
"loss": 1.5684, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.9075788249442024e-05, |
|
"loss": 1.5405, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.852367710244921e-05, |
|
"loss": 1.5361, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.7974089334362057e-05, |
|
"loss": 1.5219, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.742703861160198e-05, |
|
"loss": 1.5518, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.688253853750227e-05, |
|
"loss": 1.5699, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.63406026519703e-05, |
|
"loss": 1.5368, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.5801244431150394e-05, |
|
"loss": 1.5286, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.526447728708908e-05, |
|
"loss": 1.5651, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"eval_loss": 1.8590147495269775, |
|
"eval_runtime": 291.2621, |
|
"eval_samples_per_second": 205.008, |
|
"eval_steps_per_second": 205.008, |
|
"step": 480 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 640, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 160, |
|
"total_flos": 1.405424023508091e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|