|
{ |
|
"best_metric": 1.60513174533844, |
|
"best_model_checkpoint": "./lora-out/checkpoint-250", |
|
"epoch": 0.7776049766718507, |
|
"eval_steps": 50, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.7924, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4e-05, |
|
"loss": 1.8083, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6e-05, |
|
"loss": 1.8177, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-05, |
|
"loss": 1.7595, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6598, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00012, |
|
"loss": 1.6919, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00014, |
|
"loss": 1.6706, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00016, |
|
"loss": 1.6879, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00018, |
|
"loss": 1.7051, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000199999456645141, |
|
"loss": 1.6809, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019999782658646859, |
|
"loss": 1.6098, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001999951098416968, |
|
"loss": 1.7014, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019999130644034888, |
|
"loss": 1.5885, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019998641642375657, |
|
"loss": 1.6243, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019998043984506027, |
|
"loss": 1.6484, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019997337676920803, |
|
"loss": 1.6093, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019996522727295496, |
|
"loss": 1.6173, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019995599144486247, |
|
"loss": 1.646, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019994566938529712, |
|
"loss": 1.6469, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019993426120642983, |
|
"loss": 1.6564, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019992176703223432, |
|
"loss": 1.5901, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000199908186998486, |
|
"loss": 1.664, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019989352125276047, |
|
"loss": 1.6275, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019987776995443178, |
|
"loss": 1.5839, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019986093327467076, |
|
"loss": 1.5611, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019984301139644334, |
|
"loss": 1.669, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001998240045145083, |
|
"loss": 1.5641, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019980391283541522, |
|
"loss": 1.6023, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019978273657750238, |
|
"loss": 1.6309, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001997604759708942, |
|
"loss": 1.6353, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019973713125749884, |
|
"loss": 1.6328, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019971270269100564, |
|
"loss": 1.5683, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019968719053688213, |
|
"loss": 1.6217, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001996605950723714, |
|
"loss": 1.5734, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019963291658648896, |
|
"loss": 1.6162, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019960415538001957, |
|
"loss": 1.5922, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001995743117655141, |
|
"loss": 1.5806, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000199543386067286, |
|
"loss": 1.5938, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019951137862140778, |
|
"loss": 1.6386, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019947828977570756, |
|
"loss": 1.6476, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019944411988976496, |
|
"loss": 1.6557, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019940886933490749, |
|
"loss": 1.5836, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019937253849420635, |
|
"loss": 1.6421, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001993351277624723, |
|
"loss": 1.629, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019929663754625145, |
|
"loss": 1.6392, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019925706826382064, |
|
"loss": 1.5677, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019921642034518317, |
|
"loss": 1.6144, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019917469423206389, |
|
"loss": 1.6068, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019913189037790456, |
|
"loss": 1.6421, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 1.621693730354309, |
|
"eval_runtime": 233.7603, |
|
"eval_samples_per_second": 16.354, |
|
"eval_steps_per_second": 4.09, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001990880092478588, |
|
"loss": 1.6172, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001990430513187871, |
|
"loss": 1.6095, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019899701707925166, |
|
"loss": 1.5967, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019894990702951106, |
|
"loss": 1.617, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019890172168151473, |
|
"loss": 1.5932, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001988524615588976, |
|
"loss": 1.6548, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019880212719697413, |
|
"loss": 1.6033, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019875071914273278, |
|
"loss": 1.6063, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019869823795482986, |
|
"loss": 1.6107, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019864468420358354, |
|
"loss": 1.5758, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019859005847096763, |
|
"loss": 1.5723, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019853436135060527, |
|
"loss": 1.542, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019847759344776252, |
|
"loss": 1.5611, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019841975537934162, |
|
"loss": 1.6157, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019836084777387458, |
|
"loss": 1.5589, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019830087127151598, |
|
"loss": 1.6077, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019823982652403634, |
|
"loss": 1.5473, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019817771419481487, |
|
"loss": 1.6265, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001981145349588323, |
|
"loss": 1.6074, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019805028950266348, |
|
"loss": 1.6195, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019798497852447006, |
|
"loss": 1.5876, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001979186027339928, |
|
"loss": 1.5978, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019785116285254381, |
|
"loss": 1.533, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019778265961299888, |
|
"loss": 1.5888, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001977130937597894, |
|
"loss": 1.6211, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019764246604889415, |
|
"loss": 1.6091, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019757077724783147, |
|
"loss": 1.6012, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001974980281356504, |
|
"loss": 1.6401, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001974242195029227, |
|
"loss": 1.6111, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019734935215173392, |
|
"loss": 1.6208, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019727342689567482, |
|
"loss": 1.6038, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019719644455983256, |
|
"loss": 1.5915, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001971184059807817, |
|
"loss": 1.5872, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000197039312006575, |
|
"loss": 1.5984, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001969591634967344, |
|
"loss": 1.5996, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019687796132224152, |
|
"loss": 1.6056, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001967957063655283, |
|
"loss": 1.6099, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001967123995204674, |
|
"loss": 1.6295, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019662804169236225, |
|
"loss": 1.5482, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019654263379793773, |
|
"loss": 1.5781, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019645617676532963, |
|
"loss": 1.5954, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000196368671534075, |
|
"loss": 1.619, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001962801190551016, |
|
"loss": 1.6153, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001961905202907179, |
|
"loss": 1.6008, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019609987621460232, |
|
"loss": 1.5891, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001960081878117929, |
|
"loss": 1.6438, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001959154560786764, |
|
"loss": 1.5576, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019582168202297758, |
|
"loss": 1.646, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019572686666374822, |
|
"loss": 1.6269, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019563101103135602, |
|
"loss": 1.6288, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 1.6143836975097656, |
|
"eval_runtime": 233.6412, |
|
"eval_samples_per_second": 16.363, |
|
"eval_steps_per_second": 4.092, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019553411616747348, |
|
"loss": 1.5667, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019543618312506647, |
|
"loss": 1.6221, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001953372129683829, |
|
"loss": 1.5992, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001952372067729411, |
|
"loss": 1.6138, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019513616562551807, |
|
"loss": 1.51, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019503409062413782, |
|
"loss": 1.6227, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019493098287805927, |
|
"loss": 1.6014, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019482684350776434, |
|
"loss": 1.625, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001947216736449457, |
|
"loss": 1.6109, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001946154744324945, |
|
"loss": 1.62, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019450824702448778, |
|
"loss": 1.5878, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001943999925861763, |
|
"loss": 1.6264, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019429071229397157, |
|
"loss": 1.6186, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001941804073354331, |
|
"loss": 1.6363, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019406907890925562, |
|
"loss": 1.5341, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019395672822525593, |
|
"loss": 1.5986, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019384335650435985, |
|
"loss": 1.6181, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001937289649785889, |
|
"loss": 1.6118, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001936135548910469, |
|
"loss": 1.6404, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019349712749590649, |
|
"loss": 1.583, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019337968405839547, |
|
"loss": 1.5827, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019326122585478308, |
|
"loss": 1.6392, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019314175417236616, |
|
"loss": 1.5861, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019302127030945508, |
|
"loss": 1.5738, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001928997755753597, |
|
"loss": 1.5915, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019277727129037508, |
|
"loss": 1.617, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001926537587857672, |
|
"loss": 1.5582, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019252923940375844, |
|
"loss": 1.6294, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019240371449751306, |
|
"loss": 1.6087, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019227718543112236, |
|
"loss": 1.5749, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019214965357959005, |
|
"loss": 1.6041, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019202112032881715, |
|
"loss": 1.6106, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019189158707558695, |
|
"loss": 1.5553, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019176105522754995, |
|
"loss": 1.5638, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001916295262032084, |
|
"loss": 1.5921, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019149700143190096, |
|
"loss": 1.5837, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019136348235378726, |
|
"loss": 1.6341, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019122897041983205, |
|
"loss": 1.5678, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019109346709178963, |
|
"loss": 1.6137, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001909569738421878, |
|
"loss": 1.6324, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019081949215431194, |
|
"loss": 1.612, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019068102352218897, |
|
"loss": 1.5908, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019054156945057097, |
|
"loss": 1.6087, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019040113145491887, |
|
"loss": 1.5613, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000190259711061386, |
|
"loss": 1.6072, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019011730980680156, |
|
"loss": 1.5722, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001899739292386538, |
|
"loss": 1.5961, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00018982957091507325, |
|
"loss": 1.5409, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001896842364048159, |
|
"loss": 1.6557, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000189537927287246, |
|
"loss": 1.5725, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 1.6101970672607422, |
|
"eval_runtime": 233.5313, |
|
"eval_samples_per_second": 16.37, |
|
"eval_steps_per_second": 4.094, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00018939064515231888, |
|
"loss": 1.5949, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001892423916005639, |
|
"loss": 1.6191, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018909316824306674, |
|
"loss": 1.5487, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018894297670145216, |
|
"loss": 1.5104, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018879181860786623, |
|
"loss": 1.6392, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018863969560495866, |
|
"loss": 1.5932, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018848660934586491, |
|
"loss": 1.6213, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001883325614941882, |
|
"loss": 1.5515, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018817755372398155, |
|
"loss": 1.6166, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018802158771972943, |
|
"loss": 1.6552, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018786466517632956, |
|
"loss": 1.6378, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018770678779907448, |
|
"loss": 1.5176, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018754795730363302, |
|
"loss": 1.5793, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018738817541603156, |
|
"loss": 1.6616, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018722744387263544, |
|
"loss": 1.6055, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018706576442012994, |
|
"loss": 1.6204, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018690313881550137, |
|
"loss": 1.5952, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018673956882601803, |
|
"loss": 1.6271, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018657505622921082, |
|
"loss": 1.538, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018640960281285417, |
|
"loss": 1.5874, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001862432103749464, |
|
"loss": 1.5694, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018607588072369033, |
|
"loss": 1.583, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018590761567747354, |
|
"loss": 1.5961, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018573841706484866, |
|
"loss": 1.582, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001855682867245134, |
|
"loss": 1.6427, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018539722650529075, |
|
"loss": 1.604, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018522523826610868, |
|
"loss": 1.577, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018505232387598018, |
|
"loss": 1.6339, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018487848521398265, |
|
"loss": 1.5993, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001847037241692378, |
|
"loss": 1.6286, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018452804264089084, |
|
"loss": 1.5963, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018435144253809, |
|
"loss": 1.5856, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018417392577996578, |
|
"loss": 1.5787, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018399549429561006, |
|
"loss": 1.5876, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018381615002405509, |
|
"loss": 1.5565, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018363589491425248, |
|
"loss": 1.5897, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001834547309250521, |
|
"loss": 1.5951, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018327266002518056, |
|
"loss": 1.5447, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018308968419322003, |
|
"loss": 1.6087, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018290580541758668, |
|
"loss": 1.5946, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018272102569650905, |
|
"loss": 1.6148, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00018253534703800627, |
|
"loss": 1.649, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001823487714598664, |
|
"loss": 1.6312, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001821613009896244, |
|
"loss": 1.5858, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018197293766454003, |
|
"loss": 1.5925, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001817836835315759, |
|
"loss": 1.5604, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018159354064737506, |
|
"loss": 1.6125, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001814025110782387, |
|
"loss": 1.5954, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018121059690010368, |
|
"loss": 1.5937, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018101780019852008, |
|
"loss": 1.5582, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 1.6065257787704468, |
|
"eval_runtime": 233.7919, |
|
"eval_samples_per_second": 16.352, |
|
"eval_steps_per_second": 4.089, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018082412306862837, |
|
"loss": 1.5628, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018062956761513675, |
|
"loss": 1.5735, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018043413595229818, |
|
"loss": 1.6011, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018023783020388763, |
|
"loss": 1.5434, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018004065250317868, |
|
"loss": 1.5533, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00017984260499292058, |
|
"loss": 1.6074, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00017964368982531487, |
|
"loss": 1.5286, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017944390916199203, |
|
"loss": 1.5161, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017924326517398793, |
|
"loss": 1.6024, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017904176004172027, |
|
"loss": 1.5727, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001788393959549649, |
|
"loss": 1.5752, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00017863617511283203, |
|
"loss": 1.5845, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00017843209972374233, |
|
"loss": 1.6082, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017822717200540283, |
|
"loss": 1.5895, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017802139418478298, |
|
"loss": 1.5836, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017781476849809038, |
|
"loss": 1.5996, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017760729719074644, |
|
"loss": 1.6256, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000177398982517362, |
|
"loss": 1.628, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017718982674171284, |
|
"loss": 1.5543, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017697983213671515, |
|
"loss": 1.5732, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001767690009844007, |
|
"loss": 1.5892, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001765573355758921, |
|
"loss": 1.6524, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00017634483821137787, |
|
"loss": 1.5694, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001761315112000876, |
|
"loss": 1.6006, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017591735686026661, |
|
"loss": 1.6161, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017570237751915092, |
|
"loss": 1.595, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017548657551294192, |
|
"loss": 1.6072, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000175269953186781, |
|
"loss": 1.5855, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017505251289472406, |
|
"loss": 1.597, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001748342569997158, |
|
"loss": 1.5837, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00017461518787356432, |
|
"loss": 1.5422, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00017439530789691506, |
|
"loss": 1.5837, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001741746194592251, |
|
"loss": 1.6038, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017395312495873717, |
|
"loss": 1.5882, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017373082680245347, |
|
"loss": 1.5763, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017350772740610976, |
|
"loss": 1.6046, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017328382919414877, |
|
"loss": 1.594, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017305913459969414, |
|
"loss": 1.5903, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017283364606452396, |
|
"loss": 1.5704, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001726073660390439, |
|
"loss": 1.588, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017238029698226113, |
|
"loss": 1.6273, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017215244136175705, |
|
"loss": 1.5166, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017192380165366092, |
|
"loss": 1.5813, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001716943803426226, |
|
"loss": 1.5654, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001714641799217858, |
|
"loss": 1.5548, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017123320289276085, |
|
"loss": 1.5491, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001710014517655976, |
|
"loss": 1.5903, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017076892905875806, |
|
"loss": 1.5687, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017053563729908905, |
|
"loss": 1.5975, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017030157902179485, |
|
"loss": 1.6055, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 1.60513174533844, |
|
"eval_runtime": 233.7813, |
|
"eval_samples_per_second": 16.353, |
|
"eval_steps_per_second": 4.089, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 963, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"total_flos": 7.01067914379264e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|