|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.1347517730496455, |
|
"eval_steps": 500, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00019981060606060605, |
|
"loss": 2.9206, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019962121212121212, |
|
"loss": 2.7609, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001994318181818182, |
|
"loss": 2.6878, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019924242424242426, |
|
"loss": 2.6697, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001990530303030303, |
|
"loss": 2.5818, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019886363636363637, |
|
"loss": 2.5396, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019867424242424244, |
|
"loss": 2.5265, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001984848484848485, |
|
"loss": 2.5475, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019829545454545455, |
|
"loss": 2.4835, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001981060606060606, |
|
"loss": 2.4559, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001979166666666667, |
|
"loss": 2.4511, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019772727272727273, |
|
"loss": 2.4592, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001975378787878788, |
|
"loss": 2.4495, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019734848484848484, |
|
"loss": 2.4714, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019715909090909094, |
|
"loss": 2.4302, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019696969696969698, |
|
"loss": 2.4097, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019678030303030305, |
|
"loss": 2.4523, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001965909090909091, |
|
"loss": 2.4325, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019640151515151516, |
|
"loss": 2.4125, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019621212121212123, |
|
"loss": 2.4329, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019602272727272727, |
|
"loss": 2.3471, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019583333333333334, |
|
"loss": 2.3012, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001956439393939394, |
|
"loss": 2.3869, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019545454545454548, |
|
"loss": 2.3822, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019526515151515152, |
|
"loss": 2.3427, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001950757575757576, |
|
"loss": 2.3659, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019488636363636366, |
|
"loss": 2.3826, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001946969696969697, |
|
"loss": 2.3532, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019450757575757577, |
|
"loss": 2.3828, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001943181818181818, |
|
"loss": 2.3133, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001941287878787879, |
|
"loss": 2.3613, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019393939393939395, |
|
"loss": 2.3867, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019375000000000002, |
|
"loss": 2.2966, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019356060606060606, |
|
"loss": 2.3436, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019337121212121213, |
|
"loss": 2.3425, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001931818181818182, |
|
"loss": 2.307, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019299242424242424, |
|
"loss": 2.3521, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001928030303030303, |
|
"loss": 2.3302, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019261363636363635, |
|
"loss": 2.312, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019242424242424245, |
|
"loss": 2.3655, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001922348484848485, |
|
"loss": 2.344, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019204545454545456, |
|
"loss": 2.3373, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001918560606060606, |
|
"loss": 2.3331, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019166666666666667, |
|
"loss": 2.3376, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019147727272727274, |
|
"loss": 2.3369, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019128787878787878, |
|
"loss": 2.3413, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019109848484848485, |
|
"loss": 2.3212, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019090909090909092, |
|
"loss": 2.307, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000190719696969697, |
|
"loss": 2.2929, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019053030303030303, |
|
"loss": 2.2873, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001903409090909091, |
|
"loss": 2.3098, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019015151515151517, |
|
"loss": 2.3129, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001899621212121212, |
|
"loss": 2.3038, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00018977272727272728, |
|
"loss": 2.286, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00018958333333333332, |
|
"loss": 2.3388, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00018939393939393942, |
|
"loss": 2.3193, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00018920454545454546, |
|
"loss": 2.3136, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00018901515151515153, |
|
"loss": 2.3141, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018882575757575757, |
|
"loss": 2.3646, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018863636363636364, |
|
"loss": 2.3318, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001884469696969697, |
|
"loss": 2.2977, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018825757575757575, |
|
"loss": 2.2764, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018806818181818182, |
|
"loss": 2.3095, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001878787878787879, |
|
"loss": 2.252, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018768939393939396, |
|
"loss": 2.2786, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001875, |
|
"loss": 2.2789, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018731060606060607, |
|
"loss": 2.2841, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018712121212121212, |
|
"loss": 2.3436, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018693181818181818, |
|
"loss": 2.2956, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018674242424242425, |
|
"loss": 2.2353, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001865530303030303, |
|
"loss": 2.2772, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018636363636363636, |
|
"loss": 2.2496, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018617424242424243, |
|
"loss": 2.2477, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001859848484848485, |
|
"loss": 2.2791, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018579545454545454, |
|
"loss": 2.2799, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018560606060606061, |
|
"loss": 2.3132, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018541666666666668, |
|
"loss": 2.2542, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018522727272727273, |
|
"loss": 2.2609, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001850378787878788, |
|
"loss": 2.2819, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018484848484848484, |
|
"loss": 2.2844, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018465909090909093, |
|
"loss": 2.2542, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018446969696969697, |
|
"loss": 2.2603, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018428030303030304, |
|
"loss": 2.2832, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018409090909090909, |
|
"loss": 2.2869, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018390151515151518, |
|
"loss": 2.2646, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018371212121212122, |
|
"loss": 2.2698, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00018352272727272727, |
|
"loss": 2.2757, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00018333333333333334, |
|
"loss": 2.2544, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001831439393939394, |
|
"loss": 2.2678, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018295454545454547, |
|
"loss": 2.2778, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018276515151515152, |
|
"loss": 2.2027, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018257575757575758, |
|
"loss": 2.2167, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018238636363636365, |
|
"loss": 2.2602, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018219696969696972, |
|
"loss": 2.2736, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018200757575757577, |
|
"loss": 2.2443, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018181818181818183, |
|
"loss": 2.2299, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001816287878787879, |
|
"loss": 2.2644, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018143939393939395, |
|
"loss": 2.259, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018125000000000001, |
|
"loss": 2.2567, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018106060606060606, |
|
"loss": 2.2599, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00018087121212121213, |
|
"loss": 2.2091, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001806818181818182, |
|
"loss": 2.2312, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00018049242424242426, |
|
"loss": 2.1869, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001803030303030303, |
|
"loss": 2.2023, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018011363636363638, |
|
"loss": 2.2132, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00017992424242424244, |
|
"loss": 2.2612, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001797348484848485, |
|
"loss": 2.2109, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00017954545454545456, |
|
"loss": 2.215, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001793560606060606, |
|
"loss": 2.2114, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001791666666666667, |
|
"loss": 2.2203, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00017897727272727274, |
|
"loss": 2.2594, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001787878787878788, |
|
"loss": 2.2001, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00017859848484848485, |
|
"loss": 2.2046, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00017840909090909092, |
|
"loss": 2.1907, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00017821969696969699, |
|
"loss": 2.2539, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00017803030303030303, |
|
"loss": 2.2335, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001778409090909091, |
|
"loss": 2.2171, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00017765151515151517, |
|
"loss": 2.2278, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00017746212121212123, |
|
"loss": 2.231, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00017727272727272728, |
|
"loss": 2.2141, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00017708333333333335, |
|
"loss": 2.2432, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00017689393939393942, |
|
"loss": 2.2266, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00017670454545454546, |
|
"loss": 2.1929, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00017651515151515153, |
|
"loss": 2.2077, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00017632575757575757, |
|
"loss": 2.2133, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00017613636363636366, |
|
"loss": 2.2251, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001759469696969697, |
|
"loss": 2.2265, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00017575757575757578, |
|
"loss": 2.2186, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00017556818181818182, |
|
"loss": 2.1925, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001753787878787879, |
|
"loss": 2.1956, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00017518939393939396, |
|
"loss": 2.2459, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000175, |
|
"loss": 2.22, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00017481060606060607, |
|
"loss": 2.2143, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001746212121212121, |
|
"loss": 2.2359, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001744318181818182, |
|
"loss": 2.2058, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00017424242424242425, |
|
"loss": 2.2307, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00017405303030303032, |
|
"loss": 2.2062, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00017386363636363636, |
|
"loss": 2.1796, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00017367424242424243, |
|
"loss": 2.2054, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001734848484848485, |
|
"loss": 2.1651, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00017329545454545454, |
|
"loss": 2.2159, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001731060606060606, |
|
"loss": 2.1988, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00017291666666666668, |
|
"loss": 2.1676, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00017272727272727275, |
|
"loss": 2.1725, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001725378787878788, |
|
"loss": 2.2205, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00017234848484848486, |
|
"loss": 2.1486, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017215909090909093, |
|
"loss": 2.147, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017196969696969697, |
|
"loss": 2.1651, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017178030303030304, |
|
"loss": 2.1983, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017159090909090908, |
|
"loss": 2.1778, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017140151515151518, |
|
"loss": 2.1631, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017121212121212122, |
|
"loss": 2.1442, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001710227272727273, |
|
"loss": 2.1397, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017083333333333333, |
|
"loss": 2.1697, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001706439393939394, |
|
"loss": 2.1451, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017045454545454547, |
|
"loss": 2.1789, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001702651515151515, |
|
"loss": 2.1037, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00017007575757575758, |
|
"loss": 2.1698, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00016988636363636365, |
|
"loss": 2.1538, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00016969696969696972, |
|
"loss": 2.2015, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00016950757575757576, |
|
"loss": 2.179, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00016931818181818183, |
|
"loss": 2.1766, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001691287878787879, |
|
"loss": 2.1646, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00016893939393939394, |
|
"loss": 2.1694, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00016875, |
|
"loss": 2.1562, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00016856060606060605, |
|
"loss": 2.1551, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00016837121212121212, |
|
"loss": 2.1652, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001681818181818182, |
|
"loss": 2.1594, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00016799242424242426, |
|
"loss": 2.1674, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001678030303030303, |
|
"loss": 2.1378, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00016761363636363637, |
|
"loss": 2.1447, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00016742424242424244, |
|
"loss": 2.1451, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00016723484848484848, |
|
"loss": 2.1336, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00016704545454545455, |
|
"loss": 2.1231, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001668560606060606, |
|
"loss": 2.1143, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 2.1316, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00016647727272727273, |
|
"loss": 2.1281, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001662878787878788, |
|
"loss": 2.136, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00016609848484848484, |
|
"loss": 2.1279, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00016590909090909094, |
|
"loss": 2.1421, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00016571969696969698, |
|
"loss": 2.1541, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00016553030303030305, |
|
"loss": 2.1293, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001653409090909091, |
|
"loss": 2.1294, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00016515151515151516, |
|
"loss": 2.1459, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00016496212121212123, |
|
"loss": 2.1113, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00016477272727272727, |
|
"loss": 2.1394, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00016458333333333334, |
|
"loss": 2.1321, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001643939393939394, |
|
"loss": 2.148, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00016420454545454548, |
|
"loss": 2.1631, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00016401515151515152, |
|
"loss": 2.1276, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001638257575757576, |
|
"loss": 2.0706, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00016363636363636366, |
|
"loss": 2.127, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001634469696969697, |
|
"loss": 2.1449, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00016325757575757577, |
|
"loss": 2.1204, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001630681818181818, |
|
"loss": 2.0904, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001628787878787879, |
|
"loss": 2.1129, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00016268939393939395, |
|
"loss": 2.1036, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00016250000000000002, |
|
"loss": 2.1509, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00016231060606060606, |
|
"loss": 2.1239, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00016212121212121213, |
|
"loss": 2.145, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0001619318181818182, |
|
"loss": 2.1221, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00016174242424242424, |
|
"loss": 2.1181, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001615530303030303, |
|
"loss": 2.1306, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00016136363636363635, |
|
"loss": 2.0199, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00016117424242424245, |
|
"loss": 2.1178, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001609848484848485, |
|
"loss": 2.1584, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00016079545454545456, |
|
"loss": 2.0872, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0001606060606060606, |
|
"loss": 2.1033, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00016041666666666667, |
|
"loss": 2.1381, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00016022727272727274, |
|
"loss": 2.1127, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00016003787878787878, |
|
"loss": 2.1077, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00015984848484848485, |
|
"loss": 2.0984, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00015965909090909092, |
|
"loss": 2.0994, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000159469696969697, |
|
"loss": 2.096, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00015928030303030303, |
|
"loss": 2.0909, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001590909090909091, |
|
"loss": 2.118, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00015890151515151517, |
|
"loss": 2.0783, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001587121212121212, |
|
"loss": 2.0876, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00015852272727272728, |
|
"loss": 2.0581, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00015833333333333332, |
|
"loss": 2.0548, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00015814393939393942, |
|
"loss": 2.0595, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00015795454545454546, |
|
"loss": 2.0719, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00015776515151515153, |
|
"loss": 2.0903, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00015757575757575757, |
|
"loss": 2.0941, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00015738636363636364, |
|
"loss": 2.0926, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001571969696969697, |
|
"loss": 2.0816, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00015700757575757575, |
|
"loss": 2.0894, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00015681818181818182, |
|
"loss": 2.0798, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001566287878787879, |
|
"loss": 2.0672, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00015643939393939396, |
|
"loss": 2.0787, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015625, |
|
"loss": 2.0611, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015606060606060607, |
|
"loss": 2.0805, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015587121212121211, |
|
"loss": 2.053, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015568181818181818, |
|
"loss": 2.0575, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00015549242424242425, |
|
"loss": 2.0459, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0001553030303030303, |
|
"loss": 2.0635, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00015511363636363636, |
|
"loss": 2.0335, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00015492424242424243, |
|
"loss": 2.0681, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001547348484848485, |
|
"loss": 2.0748, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00015454545454545454, |
|
"loss": 2.1091, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001543560606060606, |
|
"loss": 2.0732, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00015416666666666668, |
|
"loss": 2.0746, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00015397727272727272, |
|
"loss": 2.0306, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001537878787878788, |
|
"loss": 2.0864, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015359848484848484, |
|
"loss": 2.0664, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015340909090909093, |
|
"loss": 2.0801, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015321969696969697, |
|
"loss": 2.0799, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015303030303030304, |
|
"loss": 2.0621, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00015284090909090909, |
|
"loss": 2.0687, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00015265151515151515, |
|
"loss": 2.018, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00015246212121212122, |
|
"loss": 2.0256, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00015227272727272727, |
|
"loss": 2.0736, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00015208333333333333, |
|
"loss": 2.0609, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001518939393939394, |
|
"loss": 2.0539, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00015170454545454547, |
|
"loss": 2.0282, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00015151515151515152, |
|
"loss": 2.0417, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00015132575757575758, |
|
"loss": 2.0333, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00015113636363636365, |
|
"loss": 2.0428, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00015094696969696972, |
|
"loss": 2.045, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00015075757575757576, |
|
"loss": 2.0463, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0001505681818181818, |
|
"loss": 2.0539, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0001503787878787879, |
|
"loss": 2.0184, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00015018939393939394, |
|
"loss": 2.0858, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.0239, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00014981060606060606, |
|
"loss": 2.0425, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00014962121212121213, |
|
"loss": 2.0263, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001494318181818182, |
|
"loss": 2.042, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00014924242424242426, |
|
"loss": 2.026, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001490530303030303, |
|
"loss": 2.0411, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00014886363636363635, |
|
"loss": 2.028, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00014867424242424244, |
|
"loss": 2.0172, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00014848484848484849, |
|
"loss": 2.0196, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00014829545454545455, |
|
"loss": 2.0142, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0001481060606060606, |
|
"loss": 2.0265, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0001479166666666667, |
|
"loss": 2.0353, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00014772727272727274, |
|
"loss": 2.0327, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001475378787878788, |
|
"loss": 2.0188, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00014734848484848485, |
|
"loss": 1.9987, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00014715909090909092, |
|
"loss": 2.0141, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00014696969696969698, |
|
"loss": 2.0403, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00014678030303030303, |
|
"loss": 1.9977, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001465909090909091, |
|
"loss": 1.9674, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00014640151515151517, |
|
"loss": 1.9984, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00014621212121212123, |
|
"loss": 1.9796, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00014602272727272728, |
|
"loss": 2.0139, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00014583333333333335, |
|
"loss": 1.9866, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00014564393939393941, |
|
"loss": 2.0208, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00014545454545454546, |
|
"loss": 1.9844, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00014526515151515153, |
|
"loss": 2.0082, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00014507575757575757, |
|
"loss": 1.984, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00014488636363636366, |
|
"loss": 2.0015, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001446969696969697, |
|
"loss": 2.0209, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00014450757575757578, |
|
"loss": 1.9728, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00014431818181818182, |
|
"loss": 2.0032, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00014412878787878789, |
|
"loss": 1.9641, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00014393939393939396, |
|
"loss": 1.9945, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00014375, |
|
"loss": 1.9658, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00014356060606060607, |
|
"loss": 1.9907, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001433712121212121, |
|
"loss": 1.9935, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001431818181818182, |
|
"loss": 1.9897, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00014299242424242425, |
|
"loss": 1.984, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00014280303030303032, |
|
"loss": 1.9581, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00014261363636363636, |
|
"loss": 1.9893, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00014242424242424243, |
|
"loss": 1.9568, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001422348484848485, |
|
"loss": 1.98, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00014204545454545454, |
|
"loss": 1.9519, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001418560606060606, |
|
"loss": 1.9693, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00014166666666666668, |
|
"loss": 1.9866, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00014147727272727275, |
|
"loss": 1.9508, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0001412878787878788, |
|
"loss": 1.9653, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00014109848484848486, |
|
"loss": 1.9991, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00014090909090909093, |
|
"loss": 1.9442, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00014071969696969697, |
|
"loss": 1.9807, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00014053030303030304, |
|
"loss": 1.9958, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00014034090909090908, |
|
"loss": 1.9459, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00014015151515151518, |
|
"loss": 1.9508, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00013996212121212122, |
|
"loss": 1.9933, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001397727272727273, |
|
"loss": 1.9703, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00013958333333333333, |
|
"loss": 1.965, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001393939393939394, |
|
"loss": 1.9264, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00013920454545454547, |
|
"loss": 1.9688, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001390151515151515, |
|
"loss": 1.9901, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013882575757575758, |
|
"loss": 1.9363, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013863636363636365, |
|
"loss": 1.9269, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013844696969696972, |
|
"loss": 1.9688, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013825757575757576, |
|
"loss": 1.9758, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013806818181818183, |
|
"loss": 1.9414, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001378787878787879, |
|
"loss": 1.9397, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013768939393939394, |
|
"loss": 1.9032, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001375, |
|
"loss": 1.9777, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00013731060606060605, |
|
"loss": 1.9173, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00013712121212121212, |
|
"loss": 1.9307, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001369318181818182, |
|
"loss": 1.9611, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00013674242424242426, |
|
"loss": 1.9698, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0001365530303030303, |
|
"loss": 1.9619, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00013636363636363637, |
|
"loss": 1.9322, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00013617424242424244, |
|
"loss": 1.9441, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00013598484848484848, |
|
"loss": 1.9563, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00013579545454545455, |
|
"loss": 1.9283, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001356060606060606, |
|
"loss": 1.9508, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001354166666666667, |
|
"loss": 1.9285, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00013522727272727273, |
|
"loss": 1.9295, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001350378787878788, |
|
"loss": 1.9272, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00013484848484848484, |
|
"loss": 1.905, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00013465909090909094, |
|
"loss": 1.9409, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00013446969696969698, |
|
"loss": 1.9674, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00013428030303030302, |
|
"loss": 1.9278, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001340909090909091, |
|
"loss": 1.9136, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00013390151515151516, |
|
"loss": 1.9143, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00013371212121212123, |
|
"loss": 1.9381, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00013352272727272727, |
|
"loss": 1.9136, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.9103, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0001331439393939394, |
|
"loss": 1.9027, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00013295454545454548, |
|
"loss": 1.8674, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00013276515151515152, |
|
"loss": 1.886, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00013257575757575756, |
|
"loss": 1.887, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00013238636363636366, |
|
"loss": 1.87, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001321969696969697, |
|
"loss": 1.8715, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00013200757575757577, |
|
"loss": 1.8993, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001318181818181818, |
|
"loss": 1.8844, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001316287878787879, |
|
"loss": 1.8965, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00013143939393939395, |
|
"loss": 1.8956, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00013125000000000002, |
|
"loss": 1.869, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00013106060606060606, |
|
"loss": 1.8702, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00013087121212121213, |
|
"loss": 1.8962, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001306818181818182, |
|
"loss": 1.8613, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00013049242424242424, |
|
"loss": 1.8845, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001303030303030303, |
|
"loss": 1.8689, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00013011363636363635, |
|
"loss": 1.9059, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00012992424242424245, |
|
"loss": 1.9082, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001297348484848485, |
|
"loss": 1.8918, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00012954545454545456, |
|
"loss": 1.8657, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001293560606060606, |
|
"loss": 1.8909, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00012916666666666667, |
|
"loss": 1.8649, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00012897727272727274, |
|
"loss": 1.833, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00012878787878787878, |
|
"loss": 1.8815, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00012859848484848485, |
|
"loss": 1.8646, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00012840909090909092, |
|
"loss": 1.846, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.000128219696969697, |
|
"loss": 1.8631, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00012803030303030303, |
|
"loss": 1.917, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001278409090909091, |
|
"loss": 1.9068, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00012765151515151517, |
|
"loss": 1.8772, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001274621212121212, |
|
"loss": 1.8414, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00012727272727272728, |
|
"loss": 1.9003, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00012708333333333332, |
|
"loss": 1.8415, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012689393939393942, |
|
"loss": 1.8491, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012670454545454546, |
|
"loss": 1.8875, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012651515151515153, |
|
"loss": 1.8629, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012632575757575757, |
|
"loss": 1.8378, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00012613636363636364, |
|
"loss": 1.8442, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001259469696969697, |
|
"loss": 1.8587, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00012575757575757575, |
|
"loss": 1.8659, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00012556818181818182, |
|
"loss": 1.8271, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001253787878787879, |
|
"loss": 1.8692, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00012518939393939396, |
|
"loss": 1.8071, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.000125, |
|
"loss": 1.8564, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00012481060606060607, |
|
"loss": 1.8891, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00012462121212121211, |
|
"loss": 1.8173, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00012443181818181818, |
|
"loss": 1.8653, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00012424242424242425, |
|
"loss": 1.8843, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1056, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.0133699319771955e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|