|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.5673758865248227, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001994318181818182, |
|
"loss": 2.7526, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019886363636363637, |
|
"loss": 2.6231, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019829545454545455, |
|
"loss": 2.5691, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019772727272727273, |
|
"loss": 2.5755, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019715909090909094, |
|
"loss": 2.5077, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001965909090909091, |
|
"loss": 2.4698, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019602272727272727, |
|
"loss": 2.4541, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019545454545454548, |
|
"loss": 2.4764, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019488636363636366, |
|
"loss": 2.4176, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001943181818181818, |
|
"loss": 2.396, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019375000000000002, |
|
"loss": 2.3929, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001931818181818182, |
|
"loss": 2.405, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019261363636363635, |
|
"loss": 2.3947, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019204545454545456, |
|
"loss": 2.4164, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019147727272727274, |
|
"loss": 2.373, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019090909090909092, |
|
"loss": 2.3552, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001903409090909091, |
|
"loss": 2.3988, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018977272727272728, |
|
"loss": 2.3826, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018920454545454546, |
|
"loss": 2.3672, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018863636363636364, |
|
"loss": 2.3885, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018806818181818182, |
|
"loss": 2.3043, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001875, |
|
"loss": 2.2597, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018693181818181818, |
|
"loss": 2.3457, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018636363636363636, |
|
"loss": 2.3409, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018579545454545454, |
|
"loss": 2.303, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018522727272727273, |
|
"loss": 2.3253, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018465909090909093, |
|
"loss": 2.3453, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018409090909090909, |
|
"loss": 2.3151, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018352272727272727, |
|
"loss": 2.3457, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018295454545454547, |
|
"loss": 2.2792, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018238636363636365, |
|
"loss": 2.3257, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018181818181818183, |
|
"loss": 2.353, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018125000000000001, |
|
"loss": 2.2633, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001806818181818182, |
|
"loss": 2.3089, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00018011363636363638, |
|
"loss": 2.3085, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00017954545454545456, |
|
"loss": 2.2746, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00017897727272727274, |
|
"loss": 2.3212, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017840909090909092, |
|
"loss": 2.2991, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001778409090909091, |
|
"loss": 2.2807, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017727272727272728, |
|
"loss": 2.3342, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017670454545454546, |
|
"loss": 2.3144, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017613636363636366, |
|
"loss": 2.3084, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017556818181818182, |
|
"loss": 2.3032, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000175, |
|
"loss": 2.3105, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001744318181818182, |
|
"loss": 2.3101, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017386363636363636, |
|
"loss": 2.313, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017329545454545454, |
|
"loss": 2.293, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017272727272727275, |
|
"loss": 2.2807, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017215909090909093, |
|
"loss": 2.2665, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017159090909090908, |
|
"loss": 2.2626, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001710227272727273, |
|
"loss": 2.2839, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00017045454545454547, |
|
"loss": 2.2865, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00016988636363636365, |
|
"loss": 2.2788, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00016931818181818183, |
|
"loss": 2.2622, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016875, |
|
"loss": 2.3121, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001681818181818182, |
|
"loss": 2.2958, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016761363636363637, |
|
"loss": 2.2898, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016704545454545455, |
|
"loss": 2.2903, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016647727272727273, |
|
"loss": 2.3393, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016590909090909094, |
|
"loss": 2.3086, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001653409090909091, |
|
"loss": 2.2739, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016477272727272727, |
|
"loss": 2.252, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016420454545454548, |
|
"loss": 2.2868, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016363636363636366, |
|
"loss": 2.2301, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001630681818181818, |
|
"loss": 2.2563, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016250000000000002, |
|
"loss": 2.2552, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001619318181818182, |
|
"loss": 2.2615, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016136363636363635, |
|
"loss": 2.3237, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016079545454545456, |
|
"loss": 2.2741, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016022727272727274, |
|
"loss": 2.2152, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00015965909090909092, |
|
"loss": 2.2558, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001590909090909091, |
|
"loss": 2.2293, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015852272727272728, |
|
"loss": 2.2278, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015795454545454546, |
|
"loss": 2.2583, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015738636363636364, |
|
"loss": 2.2609, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015681818181818182, |
|
"loss": 2.2955, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015625, |
|
"loss": 2.2359, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015568181818181818, |
|
"loss": 2.2427, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015511363636363636, |
|
"loss": 2.2632, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015454545454545454, |
|
"loss": 2.2649, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015397727272727272, |
|
"loss": 2.2347, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015340909090909093, |
|
"loss": 2.2439, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015284090909090909, |
|
"loss": 2.267, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015227272727272727, |
|
"loss": 2.2682, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015170454545454547, |
|
"loss": 2.2477, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015113636363636365, |
|
"loss": 2.2534, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001505681818181818, |
|
"loss": 2.2609, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.2381, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001494318181818182, |
|
"loss": 2.2505, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00014886363636363635, |
|
"loss": 2.2628, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00014829545454545455, |
|
"loss": 2.1874, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00014772727272727274, |
|
"loss": 2.2024, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00014715909090909092, |
|
"loss": 2.2453, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001465909090909091, |
|
"loss": 2.259, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014602272727272728, |
|
"loss": 2.2304, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014545454545454546, |
|
"loss": 2.2179, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014488636363636366, |
|
"loss": 2.2497, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014431818181818182, |
|
"loss": 2.2458, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014375, |
|
"loss": 2.2438, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001431818181818182, |
|
"loss": 2.2461, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014261363636363636, |
|
"loss": 2.1966, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014204545454545454, |
|
"loss": 2.2191, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014147727272727275, |
|
"loss": 2.1765, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00014090909090909093, |
|
"loss": 2.1917, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00014034090909090908, |
|
"loss": 2.2025, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001397727272727273, |
|
"loss": 2.2496, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00013920454545454547, |
|
"loss": 2.2012, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013863636363636365, |
|
"loss": 2.2051, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013806818181818183, |
|
"loss": 2.2027, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001375, |
|
"loss": 2.21, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001369318181818182, |
|
"loss": 2.2501, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013636363636363637, |
|
"loss": 2.1916, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013579545454545455, |
|
"loss": 2.1972, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013522727272727273, |
|
"loss": 2.1821, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013465909090909094, |
|
"loss": 2.2448, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001340909090909091, |
|
"loss": 2.2253, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013352272727272727, |
|
"loss": 2.2088, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013295454545454548, |
|
"loss": 2.2203, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013238636363636366, |
|
"loss": 2.2236, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001318181818181818, |
|
"loss": 2.2074, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013125000000000002, |
|
"loss": 2.2355, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001306818181818182, |
|
"loss": 2.2215, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00013011363636363635, |
|
"loss": 2.1854, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012954545454545456, |
|
"loss": 2.2046, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012897727272727274, |
|
"loss": 2.2079, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012840909090909092, |
|
"loss": 2.2204, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001278409090909091, |
|
"loss": 2.2223, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012727272727272728, |
|
"loss": 2.2149, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012670454545454546, |
|
"loss": 2.1887, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012613636363636364, |
|
"loss": 2.1908, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012556818181818182, |
|
"loss": 2.2434, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000125, |
|
"loss": 2.2169, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012443181818181818, |
|
"loss": 2.2113, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012386363636363636, |
|
"loss": 2.2334, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012329545454545454, |
|
"loss": 2.2034, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012272727272727272, |
|
"loss": 2.2297, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012215909090909093, |
|
"loss": 2.2049, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012159090909090908, |
|
"loss": 2.1773, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012102272727272728, |
|
"loss": 2.2057, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012045454545454546, |
|
"loss": 2.1652, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00011988636363636365, |
|
"loss": 2.2157, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00011931818181818182, |
|
"loss": 2.1987, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00011875, |
|
"loss": 2.1666, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001181818181818182, |
|
"loss": 2.1734, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00011761363636363636, |
|
"loss": 2.2221, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00011704545454545454, |
|
"loss": 2.1488, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011647727272727273, |
|
"loss": 2.1487, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011590909090909093, |
|
"loss": 2.1678, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011534090909090908, |
|
"loss": 2.1988, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011477272727272728, |
|
"loss": 2.1803, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011420454545454547, |
|
"loss": 2.1671, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011363636363636365, |
|
"loss": 2.1472, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011306818181818182, |
|
"loss": 2.1456, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011250000000000001, |
|
"loss": 2.1769, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011193181818181819, |
|
"loss": 2.1484, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011136363636363636, |
|
"loss": 2.1862, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011079545454545455, |
|
"loss": 2.1088, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011022727272727273, |
|
"loss": 2.1746, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00010965909090909093, |
|
"loss": 2.1609, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00010909090909090909, |
|
"loss": 2.2089, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010852272727272727, |
|
"loss": 2.1862, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010795454545454547, |
|
"loss": 2.1844, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010738636363636365, |
|
"loss": 2.1717, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010681818181818181, |
|
"loss": 2.1764, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010625000000000001, |
|
"loss": 2.1642, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010568181818181819, |
|
"loss": 2.164, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010511363636363635, |
|
"loss": 2.1743, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010454545454545455, |
|
"loss": 2.1688, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010397727272727273, |
|
"loss": 2.1756, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010340909090909092, |
|
"loss": 2.1488, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010284090909090909, |
|
"loss": 2.1558, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010227272727272727, |
|
"loss": 2.1551, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010170454545454546, |
|
"loss": 2.1462, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010113636363636366, |
|
"loss": 2.134, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010056818181818181, |
|
"loss": 2.1278, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001, |
|
"loss": 2.1436, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.943181818181819e-05, |
|
"loss": 2.142, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.886363636363637e-05, |
|
"loss": 2.1481, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.829545454545455e-05, |
|
"loss": 2.1406, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.772727272727274e-05, |
|
"loss": 2.1555, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 9.71590909090909e-05, |
|
"loss": 2.1667, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.65909090909091e-05, |
|
"loss": 2.1455, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.602272727272728e-05, |
|
"loss": 2.1447, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.545454545454546e-05, |
|
"loss": 2.1642, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.488636363636364e-05, |
|
"loss": 2.1278, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.431818181818182e-05, |
|
"loss": 2.1548, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.375e-05, |
|
"loss": 2.1506, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.318181818181818e-05, |
|
"loss": 2.1649, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.261363636363636e-05, |
|
"loss": 2.1824, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.204545454545454e-05, |
|
"loss": 2.1451, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.147727272727274e-05, |
|
"loss": 2.089, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 2.1457, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.03409090909091e-05, |
|
"loss": 2.1647, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.977272727272728e-05, |
|
"loss": 2.1464, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.920454545454546e-05, |
|
"loss": 2.116, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.863636363636364e-05, |
|
"loss": 2.1326, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.806818181818183e-05, |
|
"loss": 2.1248, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.75e-05, |
|
"loss": 2.172, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.693181818181818e-05, |
|
"loss": 2.1478, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.636363636363637e-05, |
|
"loss": 2.1691, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 352, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 5.0683926177970176e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|