|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 493, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001, |
|
"loss": 2.3484, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5463, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002, |
|
"loss": 2.6055, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001995983935742972, |
|
"loss": 2.3676, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001991967871485944, |
|
"loss": 1.8278, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001991967871485944, |
|
"loss": 1.6644, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00019879518072289158, |
|
"loss": 1.5494, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019839357429718877, |
|
"loss": 1.2689, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019799196787148596, |
|
"loss": 1.0911, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019759036144578314, |
|
"loss": 0.8925, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019718875502008033, |
|
"loss": 0.8251, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019678714859437752, |
|
"loss": 0.8871, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001963855421686747, |
|
"loss": 0.6684, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001959839357429719, |
|
"loss": 0.7377, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019558232931726906, |
|
"loss": 0.7108, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019518072289156628, |
|
"loss": 0.5971, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019477911646586347, |
|
"loss": 0.6384, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019437751004016066, |
|
"loss": 0.6022, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019397590361445782, |
|
"loss": 0.6303, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019357429718875504, |
|
"loss": 0.6137, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019317269076305223, |
|
"loss": 0.7437, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019277108433734942, |
|
"loss": 0.7448, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019236947791164658, |
|
"loss": 0.7302, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019196787148594377, |
|
"loss": 0.6401, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019156626506024098, |
|
"loss": 0.4414, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019116465863453817, |
|
"loss": 0.5489, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019076305220883533, |
|
"loss": 0.579, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019036144578313252, |
|
"loss": 0.575, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018995983935742974, |
|
"loss": 0.7082, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018955823293172693, |
|
"loss": 0.5186, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001891566265060241, |
|
"loss": 0.5562, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018875502008032128, |
|
"loss": 0.644, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001883534136546185, |
|
"loss": 0.5768, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018795180722891569, |
|
"loss": 0.5801, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018755020080321285, |
|
"loss": 0.609, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018714859437751004, |
|
"loss": 0.5588, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018674698795180723, |
|
"loss": 0.6599, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018634538152610444, |
|
"loss": 0.6691, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001859437751004016, |
|
"loss": 0.5768, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001855421686746988, |
|
"loss": 0.5547, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018514056224899598, |
|
"loss": 0.5434, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001847389558232932, |
|
"loss": 0.5808, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018433734939759036, |
|
"loss": 0.6528, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018393574297188755, |
|
"loss": 0.6594, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018353413654618474, |
|
"loss": 0.5752, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018313253012048193, |
|
"loss": 0.7312, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00018273092369477912, |
|
"loss": 0.5985, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001823293172690763, |
|
"loss": 0.528, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001819277108433735, |
|
"loss": 0.5257, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001815261044176707, |
|
"loss": 0.5592, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001811244979919679, |
|
"loss": 0.5552, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00018072289156626507, |
|
"loss": 0.6648, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00018032128514056225, |
|
"loss": 0.7509, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017991967871485944, |
|
"loss": 0.4606, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017951807228915663, |
|
"loss": 0.5747, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00017911646586345382, |
|
"loss": 0.5779, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000178714859437751, |
|
"loss": 0.4876, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001783132530120482, |
|
"loss": 0.58, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001779116465863454, |
|
"loss": 0.5961, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017751004016064258, |
|
"loss": 0.4444, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00017710843373493977, |
|
"loss": 0.6475, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017670682730923696, |
|
"loss": 0.5423, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017630522088353415, |
|
"loss": 0.5865, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017590361445783134, |
|
"loss": 0.5563, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017550200803212853, |
|
"loss": 0.4826, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017510040160642571, |
|
"loss": 0.5253, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001746987951807229, |
|
"loss": 0.6209, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001742971887550201, |
|
"loss": 0.4979, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017389558232931728, |
|
"loss": 0.6658, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017349397590361447, |
|
"loss": 0.4918, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017309236947791166, |
|
"loss": 0.6668, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00017269076305220885, |
|
"loss": 0.5992, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00017228915662650604, |
|
"loss": 0.6433, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00017188755020080323, |
|
"loss": 0.647, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00017148594377510042, |
|
"loss": 0.5385, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001710843373493976, |
|
"loss": 0.5451, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00017068273092369477, |
|
"loss": 0.4437, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00017028112449799199, |
|
"loss": 0.4511, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016987951807228917, |
|
"loss": 0.5662, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016947791164658636, |
|
"loss": 0.6221, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00016907630522088353, |
|
"loss": 0.567, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016867469879518074, |
|
"loss": 0.5783, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016827309236947793, |
|
"loss": 0.5302, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016787148594377512, |
|
"loss": 0.6094, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016746987951807228, |
|
"loss": 0.588, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016706827309236947, |
|
"loss": 0.5507, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.4017, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016626506024096388, |
|
"loss": 0.5631, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016586345381526104, |
|
"loss": 0.5022, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016546184738955823, |
|
"loss": 0.4663, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016506024096385545, |
|
"loss": 0.4863, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016465863453815263, |
|
"loss": 0.5595, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001642570281124498, |
|
"loss": 0.5928, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016385542168674699, |
|
"loss": 0.5443, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016345381526104417, |
|
"loss": 0.6472, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001630522088353414, |
|
"loss": 0.5789, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016265060240963855, |
|
"loss": 0.4732, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016224899598393574, |
|
"loss": 0.4843, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016184738955823293, |
|
"loss": 0.6245, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016144578313253015, |
|
"loss": 0.6005, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001610441767068273, |
|
"loss": 0.7296, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001606425702811245, |
|
"loss": 0.5073, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001602409638554217, |
|
"loss": 0.4158, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015983935742971888, |
|
"loss": 0.6197, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00015943775100401607, |
|
"loss": 0.6124, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015903614457831326, |
|
"loss": 0.3915, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015863453815261045, |
|
"loss": 0.5916, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015823293172690763, |
|
"loss": 0.4174, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015783132530120482, |
|
"loss": 0.4335, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000157429718875502, |
|
"loss": 0.5405, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001570281124497992, |
|
"loss": 0.5486, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001566265060240964, |
|
"loss": 0.5357, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015622489959839358, |
|
"loss": 0.4446, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015582329317269077, |
|
"loss": 0.4985, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015542168674698796, |
|
"loss": 0.5429, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015502008032128515, |
|
"loss": 0.5076, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015461847389558234, |
|
"loss": 0.5397, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015421686746987953, |
|
"loss": 0.5309, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00015381526104417672, |
|
"loss": 0.5043, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001534136546184739, |
|
"loss": 0.5628, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001530120481927711, |
|
"loss": 0.5627, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015261044176706828, |
|
"loss": 0.4973, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015220883534136547, |
|
"loss": 0.6026, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015180722891566266, |
|
"loss": 0.5615, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00015140562248995985, |
|
"loss": 0.5713, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00015100401606425701, |
|
"loss": 0.4732, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00015060240963855423, |
|
"loss": 0.6171, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00015020080321285142, |
|
"loss": 0.4272, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001497991967871486, |
|
"loss": 0.5087, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00014939759036144577, |
|
"loss": 0.4022, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000148995983935743, |
|
"loss": 0.6336, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014859437751004018, |
|
"loss": 0.4404, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014819277108433737, |
|
"loss": 0.5488, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014779116465863453, |
|
"loss": 0.5313, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00014738955823293172, |
|
"loss": 0.4526, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014698795180722893, |
|
"loss": 0.434, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014658634538152612, |
|
"loss": 0.5141, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014618473895582328, |
|
"loss": 0.4783, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00014578313253012047, |
|
"loss": 0.5319, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001453815261044177, |
|
"loss": 0.509, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014497991967871488, |
|
"loss": 0.6049, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014457831325301204, |
|
"loss": 0.6876, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014417670682730923, |
|
"loss": 0.5849, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014377510040160642, |
|
"loss": 0.5415, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00014337349397590364, |
|
"loss": 0.5939, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001429718875502008, |
|
"loss": 0.4664, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000142570281124498, |
|
"loss": 0.527, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00014216867469879518, |
|
"loss": 0.4095, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001417670682730924, |
|
"loss": 0.5033, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00014136546184738956, |
|
"loss": 0.6287, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00014096385542168674, |
|
"loss": 0.5513, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00014056224899598393, |
|
"loss": 0.4836, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00014016064257028115, |
|
"loss": 0.4919, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013975903614457834, |
|
"loss": 0.4779, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001393574297188755, |
|
"loss": 0.5458, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001389558232931727, |
|
"loss": 0.5449, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013855421686746988, |
|
"loss": 0.5182, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001381526104417671, |
|
"loss": 0.6282, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013775100401606426, |
|
"loss": 0.365, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013734939759036145, |
|
"loss": 0.5731, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013694779116465864, |
|
"loss": 0.4345, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013654618473895585, |
|
"loss": 0.5463, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013614457831325302, |
|
"loss": 0.4878, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001357429718875502, |
|
"loss": 0.5038, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001353413654618474, |
|
"loss": 0.4764, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013493975903614458, |
|
"loss": 0.5113, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013453815261044177, |
|
"loss": 0.4931, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013413654618473896, |
|
"loss": 0.5509, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013373493975903615, |
|
"loss": 0.5908, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.4747, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00013293172690763053, |
|
"loss": 0.4458, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00013253012048192772, |
|
"loss": 0.449, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001321285140562249, |
|
"loss": 0.4831, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001317269076305221, |
|
"loss": 0.3903, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00013132530120481929, |
|
"loss": 0.363, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00013092369477911648, |
|
"loss": 0.5235, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00013052208835341366, |
|
"loss": 0.4631, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00013012048192771085, |
|
"loss": 0.5049, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00012971887550200804, |
|
"loss": 0.531, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012931726907630523, |
|
"loss": 0.4807, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012891566265060242, |
|
"loss": 0.5766, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001285140562248996, |
|
"loss": 0.5113, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001281124497991968, |
|
"loss": 0.5385, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012771084337349396, |
|
"loss": 0.4967, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012730923694779118, |
|
"loss": 0.4895, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012690763052208837, |
|
"loss": 0.3827, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012650602409638556, |
|
"loss": 0.4897, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012610441767068272, |
|
"loss": 0.5642, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012570281124497994, |
|
"loss": 0.4989, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012530120481927712, |
|
"loss": 0.5212, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001248995983935743, |
|
"loss": 0.6401, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012449799196787148, |
|
"loss": 0.4974, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001240963855421687, |
|
"loss": 0.4409, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00012369477911646588, |
|
"loss": 0.5508, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012329317269076307, |
|
"loss": 0.738, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012289156626506023, |
|
"loss": 0.4174, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012248995983935742, |
|
"loss": 0.4254, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012208835341365464, |
|
"loss": 0.6255, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00012168674698795181, |
|
"loss": 0.5013, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000121285140562249, |
|
"loss": 0.4475, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00012088353413654618, |
|
"loss": 0.463, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001204819277108434, |
|
"loss": 0.475, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00012008032128514057, |
|
"loss": 0.4786, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00011967871485943776, |
|
"loss": 0.5241, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011927710843373494, |
|
"loss": 0.529, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011887550200803212, |
|
"loss": 0.5723, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011847389558232933, |
|
"loss": 0.4576, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011807228915662652, |
|
"loss": 0.542, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011767068273092369, |
|
"loss": 0.4835, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011726907630522088, |
|
"loss": 0.581, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011686746987951808, |
|
"loss": 0.5406, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011646586345381527, |
|
"loss": 0.4382, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011606425702811245, |
|
"loss": 0.5514, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011566265060240964, |
|
"loss": 0.5362, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011526104417670683, |
|
"loss": 0.53, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011485943775100403, |
|
"loss": 0.6346, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001144578313253012, |
|
"loss": 0.517, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001140562248995984, |
|
"loss": 0.6743, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011365461847389558, |
|
"loss": 0.5228, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011325301204819279, |
|
"loss": 0.4498, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011285140562248996, |
|
"loss": 0.3413, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011244979919678715, |
|
"loss": 0.5713, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011204819277108434, |
|
"loss": 0.3474, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011164658634538152, |
|
"loss": 0.4452, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011124497991967872, |
|
"loss": 0.5386, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011084337349397591, |
|
"loss": 0.5245, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001104417670682731, |
|
"loss": 0.5415, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011004016064257027, |
|
"loss": 0.6118, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010963855421686749, |
|
"loss": 0.5373, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010923694779116467, |
|
"loss": 0.524, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010883534136546186, |
|
"loss": 0.402, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010843373493975903, |
|
"loss": 0.4581, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010803212851405625, |
|
"loss": 0.585, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00010763052208835342, |
|
"loss": 0.4727, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010722891566265061, |
|
"loss": 0.5338, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010682730923694779, |
|
"loss": 0.4985, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010642570281124498, |
|
"loss": 0.4611, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010602409638554218, |
|
"loss": 0.5, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010562248995983937, |
|
"loss": 0.4257, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010522088353413654, |
|
"loss": 0.5235, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010481927710843373, |
|
"loss": 0.4891, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010441767068273094, |
|
"loss": 0.589, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010401606425702813, |
|
"loss": 0.6157, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001036144578313253, |
|
"loss": 0.4803, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010321285140562249, |
|
"loss": 0.5102, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010281124497991968, |
|
"loss": 0.5567, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010240963855421688, |
|
"loss": 0.4538, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010200803212851406, |
|
"loss": 0.4877, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010160642570281125, |
|
"loss": 0.5516, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010120481927710844, |
|
"loss": 0.4773, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010080321285140564, |
|
"loss": 0.476, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00010040160642570282, |
|
"loss": 0.5584, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4742, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.95983935742972e-05, |
|
"loss": 0.4615, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.919678714859438e-05, |
|
"loss": 0.4471, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.879518072289157e-05, |
|
"loss": 0.4661, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.839357429718876e-05, |
|
"loss": 0.3569, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.799196787148595e-05, |
|
"loss": 0.4938, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.759036144578314e-05, |
|
"loss": 0.5879, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.718875502008033e-05, |
|
"loss": 0.493, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.678714859437752e-05, |
|
"loss": 0.385, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.638554216867471e-05, |
|
"loss": 0.485, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.598393574297188e-05, |
|
"loss": 0.4833, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.558232931726909e-05, |
|
"loss": 0.5572, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.518072289156626e-05, |
|
"loss": 0.5039, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.477911646586346e-05, |
|
"loss": 0.5308, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.437751004016064e-05, |
|
"loss": 0.3708, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.397590361445784e-05, |
|
"loss": 0.4779, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.357429718875502e-05, |
|
"loss": 0.4807, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.317269076305222e-05, |
|
"loss": 0.5036, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.27710843373494e-05, |
|
"loss": 0.5056, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.23694779116466e-05, |
|
"loss": 0.5143, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.196787148594378e-05, |
|
"loss": 0.5403, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.156626506024096e-05, |
|
"loss": 0.4212, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.116465863453815e-05, |
|
"loss": 0.4461, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.076305220883534e-05, |
|
"loss": 0.4122, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.036144578313253e-05, |
|
"loss": 0.4249, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.995983935742972e-05, |
|
"loss": 0.4376, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.955823293172691e-05, |
|
"loss": 0.5155, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.91566265060241e-05, |
|
"loss": 0.4269, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.875502008032129e-05, |
|
"loss": 0.5717, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.835341365461848e-05, |
|
"loss": 0.5976, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.795180722891567e-05, |
|
"loss": 0.3902, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.755020080321286e-05, |
|
"loss": 0.543, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.714859437751005e-05, |
|
"loss": 0.4176, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.674698795180724e-05, |
|
"loss": 0.5327, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.634538152610442e-05, |
|
"loss": 0.4426, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.594377510040161e-05, |
|
"loss": 0.5193, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.55421686746988e-05, |
|
"loss": 0.4712, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.514056224899599e-05, |
|
"loss": 0.4665, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.473895582329318e-05, |
|
"loss": 0.3824, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.433734939759037e-05, |
|
"loss": 0.5133, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.393574297188756e-05, |
|
"loss": 0.516, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.353413654618474e-05, |
|
"loss": 0.4913, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.313253012048194e-05, |
|
"loss": 0.486, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.273092369477911e-05, |
|
"loss": 0.4943, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.232931726907632e-05, |
|
"loss": 0.5291, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.192771084337349e-05, |
|
"loss": 0.4005, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.15261044176707e-05, |
|
"loss": 0.4921, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.112449799196787e-05, |
|
"loss": 0.5235, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.072289156626507e-05, |
|
"loss": 0.5263, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.032128514056225e-05, |
|
"loss": 0.3585, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.991967871485944e-05, |
|
"loss": 0.4826, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.951807228915663e-05, |
|
"loss": 0.4486, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.911646586345382e-05, |
|
"loss": 0.4879, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.8714859437751e-05, |
|
"loss": 0.5017, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.83132530120482e-05, |
|
"loss": 0.5536, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.791164658634539e-05, |
|
"loss": 0.5105, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.751004016064257e-05, |
|
"loss": 0.4379, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.710843373493976e-05, |
|
"loss": 0.5182, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.670682730923695e-05, |
|
"loss": 0.5573, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.630522088353414e-05, |
|
"loss": 0.5426, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.590361445783133e-05, |
|
"loss": 0.4325, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.550200803212851e-05, |
|
"loss": 0.4559, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.510040160642571e-05, |
|
"loss": 0.3663, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.469879518072289e-05, |
|
"loss": 0.4276, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.429718875502009e-05, |
|
"loss": 0.4663, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.389558232931726e-05, |
|
"loss": 0.5068, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.349397590361447e-05, |
|
"loss": 0.5, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.309236947791164e-05, |
|
"loss": 0.431, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.269076305220885e-05, |
|
"loss": 0.4133, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.228915662650602e-05, |
|
"loss": 0.3705, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.188755020080321e-05, |
|
"loss": 0.419, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.14859437751004e-05, |
|
"loss": 0.5409, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.108433734939759e-05, |
|
"loss": 0.5186, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.068273092369478e-05, |
|
"loss": 0.438, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 7.028112449799197e-05, |
|
"loss": 0.4709, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.987951807228917e-05, |
|
"loss": 0.3494, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.947791164658635e-05, |
|
"loss": 0.4782, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.907630522088355e-05, |
|
"loss": 0.4365, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.867469879518072e-05, |
|
"loss": 0.4108, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.827309236947793e-05, |
|
"loss": 0.429, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.78714859437751e-05, |
|
"loss": 0.3968, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.746987951807229e-05, |
|
"loss": 0.5652, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.706827309236948e-05, |
|
"loss": 0.435, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.3639, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.626506024096386e-05, |
|
"loss": 0.4134, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.586345381526105e-05, |
|
"loss": 0.5979, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.546184738955824e-05, |
|
"loss": 0.4856, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.506024096385543e-05, |
|
"loss": 0.4834, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.465863453815262e-05, |
|
"loss": 0.5589, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.42570281124498e-05, |
|
"loss": 0.4846, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.385542168674698e-05, |
|
"loss": 0.3595, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.345381526104418e-05, |
|
"loss": 0.4102, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.305220883534136e-05, |
|
"loss": 0.3999, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.265060240963856e-05, |
|
"loss": 0.3772, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.224899598393574e-05, |
|
"loss": 0.4575, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.184738955823294e-05, |
|
"loss": 0.4095, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.144578313253012e-05, |
|
"loss": 0.4449, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.104417670682732e-05, |
|
"loss": 0.5009, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.06425702811245e-05, |
|
"loss": 0.3336, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.02409638554217e-05, |
|
"loss": 0.4671, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.983935742971888e-05, |
|
"loss": 0.4137, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.943775100401606e-05, |
|
"loss": 0.5014, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.903614457831326e-05, |
|
"loss": 0.557, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.863453815261044e-05, |
|
"loss": 0.5651, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.823293172690764e-05, |
|
"loss": 0.5723, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.783132530120482e-05, |
|
"loss": 0.559, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.7429718875502015e-05, |
|
"loss": 0.4025, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.70281124497992e-05, |
|
"loss": 0.4925, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.6626506024096394e-05, |
|
"loss": 0.377, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.6224899598393576e-05, |
|
"loss": 0.2866, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.582329317269076e-05, |
|
"loss": 0.5416, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.5421686746987955e-05, |
|
"loss": 0.487, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.502008032128514e-05, |
|
"loss": 0.417, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.461847389558233e-05, |
|
"loss": 0.4206, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.4216867469879516e-05, |
|
"loss": 0.4299, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.381526104417671e-05, |
|
"loss": 0.5043, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.3413654618473894e-05, |
|
"loss": 0.3499, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.301204819277109e-05, |
|
"loss": 0.3456, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.261044176706827e-05, |
|
"loss": 0.3828, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.220883534136547e-05, |
|
"loss": 0.4296, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.180722891566265e-05, |
|
"loss": 0.3614, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.140562248995984e-05, |
|
"loss": 0.5085, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.100401606425703e-05, |
|
"loss": 0.4497, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.060240963855422e-05, |
|
"loss": 0.4421, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.020080321285141e-05, |
|
"loss": 0.411, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.97991967871486e-05, |
|
"loss": 0.4727, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.9397590361445786e-05, |
|
"loss": 0.4739, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.8995983935742975e-05, |
|
"loss": 0.5005, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.8594377510040165e-05, |
|
"loss": 0.4776, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.8192771084337354e-05, |
|
"loss": 0.4337, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.779116465863454e-05, |
|
"loss": 0.3575, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.738955823293173e-05, |
|
"loss": 0.5325, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.698795180722892e-05, |
|
"loss": 0.4765, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.658634538152611e-05, |
|
"loss": 0.4794, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.61847389558233e-05, |
|
"loss": 0.5106, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.578313253012048e-05, |
|
"loss": 0.3883, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.538152610441767e-05, |
|
"loss": 0.4342, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.497991967871486e-05, |
|
"loss": 0.4893, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.457831325301205e-05, |
|
"loss": 0.4752, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.417670682730924e-05, |
|
"loss": 0.4855, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.377510040160643e-05, |
|
"loss": 0.4693, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.337349397590362e-05, |
|
"loss": 0.3851, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.297188755020081e-05, |
|
"loss": 0.5259, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.2570281124497996e-05, |
|
"loss": 0.476, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.2168674698795186e-05, |
|
"loss": 0.4004, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.176706827309237e-05, |
|
"loss": 0.4326, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.136546184738956e-05, |
|
"loss": 0.423, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.0963855421686746e-05, |
|
"loss": 0.4768, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.0562248995983936e-05, |
|
"loss": 0.5186, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.0160642570281125e-05, |
|
"loss": 0.495, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.9759036144578314e-05, |
|
"loss": 0.3834, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.93574297188755e-05, |
|
"loss": 0.3187, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.895582329317269e-05, |
|
"loss": 0.4442, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.855421686746988e-05, |
|
"loss": 0.4514, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.815261044176707e-05, |
|
"loss": 0.4302, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.7751004016064253e-05, |
|
"loss": 0.4411, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.734939759036144e-05, |
|
"loss": 0.5643, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.694779116465863e-05, |
|
"loss": 0.4168, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.654618473895582e-05, |
|
"loss": 0.5073, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.614457831325301e-05, |
|
"loss": 0.5055, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.57429718875502e-05, |
|
"loss": 0.451, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.534136546184739e-05, |
|
"loss": 0.529, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.4939759036144585e-05, |
|
"loss": 0.4821, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.4538152610441774e-05, |
|
"loss": 0.4862, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.413654618473896e-05, |
|
"loss": 0.4247, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.3734939759036146e-05, |
|
"loss": 0.4721, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4703, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.2931726907630524e-05, |
|
"loss": 0.3788, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.253012048192771e-05, |
|
"loss": 0.4845, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.21285140562249e-05, |
|
"loss": 0.3343, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.172690763052209e-05, |
|
"loss": 0.498, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.132530120481928e-05, |
|
"loss": 0.5233, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.092369477911647e-05, |
|
"loss": 0.4114, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.052208835341366e-05, |
|
"loss": 0.5315, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 3.012048192771085e-05, |
|
"loss": 0.4946, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.971887550200803e-05, |
|
"loss": 0.4793, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.931726907630522e-05, |
|
"loss": 0.4833, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.891566265060241e-05, |
|
"loss": 0.5622, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.85140562248996e-05, |
|
"loss": 0.609, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.8112449799196788e-05, |
|
"loss": 0.4482, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.7710843373493977e-05, |
|
"loss": 0.4253, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.7309236947791167e-05, |
|
"loss": 0.3964, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.6907630522088356e-05, |
|
"loss": 0.4911, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.6506024096385545e-05, |
|
"loss": 0.5473, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.6104417670682734e-05, |
|
"loss": 0.4132, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.570281124497992e-05, |
|
"loss": 0.4096, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.530120481927711e-05, |
|
"loss": 0.3632, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.48995983935743e-05, |
|
"loss": 0.4207, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.4497991967871488e-05, |
|
"loss": 0.4925, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.4096385542168677e-05, |
|
"loss": 0.3735, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.3694779116465866e-05, |
|
"loss": 0.5045, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.3293172690763055e-05, |
|
"loss": 0.4654, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.289156626506024e-05, |
|
"loss": 0.4929, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.248995983935743e-05, |
|
"loss": 0.4846, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.208835341365462e-05, |
|
"loss": 0.4246, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.168674698795181e-05, |
|
"loss": 0.5278, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.1285140562248998e-05, |
|
"loss": 0.4004, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.0883534136546184e-05, |
|
"loss": 0.4441, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.0481927710843373e-05, |
|
"loss": 0.4555, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.0080321285140562e-05, |
|
"loss": 0.3512, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.967871485943775e-05, |
|
"loss": 0.4559, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.927710843373494e-05, |
|
"loss": 0.4225, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.8875502008032127e-05, |
|
"loss": 0.483, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.8473895582329316e-05, |
|
"loss": 0.4765, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.8072289156626505e-05, |
|
"loss": 0.604, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.7670682730923694e-05, |
|
"loss": 0.4636, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.7269076305220887e-05, |
|
"loss": 0.4905, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.6867469879518073e-05, |
|
"loss": 0.4039, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.6465863453815262e-05, |
|
"loss": 0.3915, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.606425702811245e-05, |
|
"loss": 0.3922, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.566265060240964e-05, |
|
"loss": 0.579, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.526104417670683e-05, |
|
"loss": 0.3441, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.4859437751004016e-05, |
|
"loss": 0.4649, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.4457831325301205e-05, |
|
"loss": 0.4807, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.4056224899598394e-05, |
|
"loss": 0.4659, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.3654618473895583e-05, |
|
"loss": 0.2945, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.3253012048192772e-05, |
|
"loss": 0.4214, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.285140562248996e-05, |
|
"loss": 0.4886, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.244979919678715e-05, |
|
"loss": 0.5413, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.2048192771084338e-05, |
|
"loss": 0.4392, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.1646586345381528e-05, |
|
"loss": 0.5799, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.1244979919678715e-05, |
|
"loss": 0.4651, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.0843373493975904e-05, |
|
"loss": 0.4336, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.0441767068273092e-05, |
|
"loss": 0.6531, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.0040160642570281e-05, |
|
"loss": 0.4463, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 9.63855421686747e-06, |
|
"loss": 0.3375, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 9.236947791164658e-06, |
|
"loss": 0.3812, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 8.835341365461847e-06, |
|
"loss": 0.3803, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 8.433734939759036e-06, |
|
"loss": 0.5742, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 8.032128514056226e-06, |
|
"loss": 0.4986, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 7.630522088353415e-06, |
|
"loss": 0.3414, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 7.228915662650602e-06, |
|
"loss": 0.3588, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.827309236947792e-06, |
|
"loss": 0.4386, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.42570281124498e-06, |
|
"loss": 0.35, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.024096385542169e-06, |
|
"loss": 0.3784, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 5.622489959839358e-06, |
|
"loss": 0.4395, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 5.220883534136546e-06, |
|
"loss": 0.4529, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.819277108433735e-06, |
|
"loss": 0.3706, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.417670682730924e-06, |
|
"loss": 0.4192, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.016064257028113e-06, |
|
"loss": 0.4437, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.614457831325301e-06, |
|
"loss": 0.4391, |
|
"step": 493 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 418467312304128.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|