|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6859592711682744, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00019931271477663232, |
|
"loss": 2.5587, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001986254295532646, |
|
"loss": 2.3914, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019793814432989693, |
|
"loss": 2.4218, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019725085910652924, |
|
"loss": 2.3414, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001965635738831615, |
|
"loss": 2.2469, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019587628865979381, |
|
"loss": 2.3241, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019518900343642613, |
|
"loss": 2.3266, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019450171821305842, |
|
"loss": 2.1856, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019381443298969073, |
|
"loss": 2.3247, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019312714776632305, |
|
"loss": 2.3245, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019243986254295533, |
|
"loss": 2.2591, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019175257731958765, |
|
"loss": 2.1767, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019106529209621996, |
|
"loss": 2.3478, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019037800687285222, |
|
"loss": 2.3339, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018969072164948454, |
|
"loss": 2.234, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018900343642611685, |
|
"loss": 2.2651, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018831615120274914, |
|
"loss": 2.1831, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018762886597938145, |
|
"loss": 2.216, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018694158075601377, |
|
"loss": 2.1359, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018625429553264605, |
|
"loss": 2.1215, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018556701030927837, |
|
"loss": 2.2179, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018487972508591068, |
|
"loss": 2.2598, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018419243986254294, |
|
"loss": 2.1813, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018350515463917526, |
|
"loss": 2.2006, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018281786941580757, |
|
"loss": 2.1564, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018213058419243986, |
|
"loss": 2.2537, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018144329896907217, |
|
"loss": 2.1975, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001807560137457045, |
|
"loss": 2.2566, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00018006872852233677, |
|
"loss": 2.1464, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001793814432989691, |
|
"loss": 2.1421, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001786941580756014, |
|
"loss": 2.1276, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017800687285223366, |
|
"loss": 2.0649, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017731958762886598, |
|
"loss": 2.1835, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001766323024054983, |
|
"loss": 2.1711, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017594501718213058, |
|
"loss": 2.2591, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001752577319587629, |
|
"loss": 2.1471, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001745704467353952, |
|
"loss": 2.0861, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001738831615120275, |
|
"loss": 2.0702, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001731958762886598, |
|
"loss": 2.1096, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017250859106529212, |
|
"loss": 2.1062, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017182130584192438, |
|
"loss": 2.2545, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001711340206185567, |
|
"loss": 2.1572, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000170446735395189, |
|
"loss": 2.0749, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001697594501718213, |
|
"loss": 2.1922, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00016907216494845361, |
|
"loss": 2.1915, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016838487972508593, |
|
"loss": 2.1594, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016769759450171822, |
|
"loss": 2.176, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016701030927835053, |
|
"loss": 2.1223, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016632302405498285, |
|
"loss": 2.1263, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016563573883161513, |
|
"loss": 2.0481, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016494845360824742, |
|
"loss": 2.1043, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016426116838487973, |
|
"loss": 2.1678, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016357388316151202, |
|
"loss": 2.1602, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016288659793814434, |
|
"loss": 2.1448, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016219931271477665, |
|
"loss": 2.1536, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016151202749140894, |
|
"loss": 2.0339, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016082474226804125, |
|
"loss": 2.023, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016013745704467357, |
|
"loss": 2.1407, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00015945017182130585, |
|
"loss": 2.1134, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015876288659793814, |
|
"loss": 2.1652, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015807560137457046, |
|
"loss": 2.0051, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015738831615120274, |
|
"loss": 2.0604, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015670103092783506, |
|
"loss": 2.1708, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015601374570446737, |
|
"loss": 2.1106, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015532646048109966, |
|
"loss": 2.1445, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015463917525773197, |
|
"loss": 2.0879, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001539518900343643, |
|
"loss": 2.1498, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015326460481099657, |
|
"loss": 2.0719, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015257731958762886, |
|
"loss": 2.2167, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015189003436426118, |
|
"loss": 2.0811, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015120274914089346, |
|
"loss": 2.1058, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015051546391752578, |
|
"loss": 2.0392, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001498281786941581, |
|
"loss": 2.0957, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00014914089347079038, |
|
"loss": 1.9994, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001484536082474227, |
|
"loss": 2.0464, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000147766323024055, |
|
"loss": 2.0417, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001470790378006873, |
|
"loss": 2.105, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014639175257731958, |
|
"loss": 2.1147, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001457044673539519, |
|
"loss": 1.9964, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014501718213058418, |
|
"loss": 1.9723, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001443298969072165, |
|
"loss": 2.0621, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014364261168384881, |
|
"loss": 2.2703, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001429553264604811, |
|
"loss": 2.0815, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014226804123711342, |
|
"loss": 2.0774, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014158075601374573, |
|
"loss": 2.066, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014089347079037802, |
|
"loss": 2.03, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001402061855670103, |
|
"loss": 2.1433, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00013951890034364262, |
|
"loss": 2.0811, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001388316151202749, |
|
"loss": 1.9791, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013814432989690722, |
|
"loss": 2.0876, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013745704467353953, |
|
"loss": 2.0314, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013676975945017182, |
|
"loss": 1.9485, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013608247422680414, |
|
"loss": 2.078, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013539518900343645, |
|
"loss": 2.1251, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013470790378006874, |
|
"loss": 1.9736, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013402061855670103, |
|
"loss": 2.0189, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 2.0061, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013264604810996563, |
|
"loss": 1.9595, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013195876288659794, |
|
"loss": 1.9702, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013127147766323026, |
|
"loss": 2.0322, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00013058419243986254, |
|
"loss": 2.0128, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012989690721649486, |
|
"loss": 2.1276, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012920962199312717, |
|
"loss": 2.0177, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012852233676975946, |
|
"loss": 1.9366, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012783505154639175, |
|
"loss": 2.0341, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012714776632302406, |
|
"loss": 2.0251, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012646048109965635, |
|
"loss": 1.9588, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012577319587628866, |
|
"loss": 2.0445, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012508591065292098, |
|
"loss": 1.8917, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012439862542955326, |
|
"loss": 2.0385, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012371134020618558, |
|
"loss": 2.0435, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001230240549828179, |
|
"loss": 2.0666, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012233676975945018, |
|
"loss": 1.9854, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012164948453608247, |
|
"loss": 1.9233, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012096219931271477, |
|
"loss": 1.985, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012027491408934708, |
|
"loss": 2.0679, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00011958762886597938, |
|
"loss": 1.9717, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00011890034364261168, |
|
"loss": 1.9388, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000118213058419244, |
|
"loss": 1.9607, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001175257731958763, |
|
"loss": 1.9543, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001168384879725086, |
|
"loss": 1.9925, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011615120274914091, |
|
"loss": 1.9913, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011546391752577319, |
|
"loss": 1.9857, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011477663230240549, |
|
"loss": 2.0056, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001140893470790378, |
|
"loss": 2.0133, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001134020618556701, |
|
"loss": 1.8104, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001127147766323024, |
|
"loss": 1.7946, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011202749140893472, |
|
"loss": 1.8347, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011134020618556702, |
|
"loss": 2.0342, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011065292096219932, |
|
"loss": 1.9425, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00010996563573883164, |
|
"loss": 1.9546, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00010927835051546391, |
|
"loss": 1.9694, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010859106529209621, |
|
"loss": 1.936, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010790378006872852, |
|
"loss": 2.0004, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010721649484536083, |
|
"loss": 2.0462, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010652920962199313, |
|
"loss": 1.9783, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010584192439862544, |
|
"loss": 1.8628, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010515463917525774, |
|
"loss": 1.9134, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010446735395189004, |
|
"loss": 1.8846, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010378006872852236, |
|
"loss": 1.8119, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010309278350515463, |
|
"loss": 1.9242, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010240549828178693, |
|
"loss": 1.9292, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010171821305841925, |
|
"loss": 1.9763, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010103092783505155, |
|
"loss": 1.9023, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010034364261168385, |
|
"loss": 1.8064, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.965635738831616e-05, |
|
"loss": 1.9646, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.896907216494846e-05, |
|
"loss": 1.8234, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.828178694158075e-05, |
|
"loss": 2.0289, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.759450171821306e-05, |
|
"loss": 1.7698, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.690721649484537e-05, |
|
"loss": 1.9497, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.621993127147767e-05, |
|
"loss": 1.9135, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.553264604810998e-05, |
|
"loss": 1.7955, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.484536082474227e-05, |
|
"loss": 1.9452, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.415807560137457e-05, |
|
"loss": 1.9715, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.347079037800688e-05, |
|
"loss": 1.8546, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.278350515463918e-05, |
|
"loss": 1.9756, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.209621993127147e-05, |
|
"loss": 2.0691, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.140893470790379e-05, |
|
"loss": 1.8373, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.072164948453609e-05, |
|
"loss": 1.7061, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.003436426116839e-05, |
|
"loss": 1.9069, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.93470790378007e-05, |
|
"loss": 1.8055, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.865979381443299e-05, |
|
"loss": 1.8873, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.797250859106529e-05, |
|
"loss": 1.8406, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.72852233676976e-05, |
|
"loss": 1.9428, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.65979381443299e-05, |
|
"loss": 1.831, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.591065292096219e-05, |
|
"loss": 1.8802, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.52233676975945e-05, |
|
"loss": 1.8436, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.453608247422681e-05, |
|
"loss": 1.848, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.384879725085911e-05, |
|
"loss": 1.9022, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.316151202749142e-05, |
|
"loss": 1.8015, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.247422680412371e-05, |
|
"loss": 1.8204, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.178694158075601e-05, |
|
"loss": 1.798, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.109965635738833e-05, |
|
"loss": 1.8832, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.041237113402063e-05, |
|
"loss": 1.8176, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.972508591065293e-05, |
|
"loss": 1.9251, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.903780068728523e-05, |
|
"loss": 1.7559, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.835051546391753e-05, |
|
"loss": 1.805, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.766323024054983e-05, |
|
"loss": 1.7995, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.697594501718214e-05, |
|
"loss": 1.9055, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.628865979381443e-05, |
|
"loss": 1.8654, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.560137457044673e-05, |
|
"loss": 1.852, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.491408934707905e-05, |
|
"loss": 1.9377, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.422680412371135e-05, |
|
"loss": 1.8024, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.353951890034365e-05, |
|
"loss": 1.7329, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.285223367697595e-05, |
|
"loss": 1.8203, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.216494845360825e-05, |
|
"loss": 1.8223, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.147766323024055e-05, |
|
"loss": 1.8506, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.079037800687286e-05, |
|
"loss": 1.8169, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.010309278350515e-05, |
|
"loss": 1.796, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 6.941580756013745e-05, |
|
"loss": 1.7975, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.872852233676977e-05, |
|
"loss": 1.8774, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.804123711340207e-05, |
|
"loss": 1.7822, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.735395189003437e-05, |
|
"loss": 1.8542, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.8148, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.597938144329897e-05, |
|
"loss": 1.7446, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.529209621993127e-05, |
|
"loss": 1.7477, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.460481099656359e-05, |
|
"loss": 1.8121, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.391752577319587e-05, |
|
"loss": 1.7289, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.323024054982817e-05, |
|
"loss": 1.8006, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.254295532646049e-05, |
|
"loss": 1.7348, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 291, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.2148491200262144e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|