Llama2_Instruction_Finetuning_Experiments
/
llama2_7b_AdamW_Cosine
/checkpoint-400
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9891808346213292, | |
"eval_steps": 500, | |
"global_step": 400, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0, | |
"learning_rate": 2.9999999999999997e-06, | |
"loss": 1.8153, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.0, | |
"learning_rate": 5.999999999999999e-06, | |
"loss": 1.7198, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 8.999999999999999e-06, | |
"loss": 1.8138, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.1999999999999999e-05, | |
"loss": 1.91, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.4999999999999999e-05, | |
"loss": 1.8071, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.7999999999999997e-05, | |
"loss": 1.8472, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.1e-05, | |
"loss": 1.8289, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.3999999999999997e-05, | |
"loss": 1.9348, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.6999999999999996e-05, | |
"loss": 1.9788, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.9999999999999997e-05, | |
"loss": 1.8609, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.2999999999999996e-05, | |
"loss": 1.8541, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.5999999999999994e-05, | |
"loss": 1.759, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.9e-05, | |
"loss": 1.7788, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 4.2e-05, | |
"loss": 1.8238, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 4.4999999999999996e-05, | |
"loss": 1.851, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 4.7999999999999994e-05, | |
"loss": 1.6828, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5.1e-05, | |
"loss": 1.5966, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5.399999999999999e-05, | |
"loss": 1.77, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5.6999999999999996e-05, | |
"loss": 1.798, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5.9999999999999995e-05, | |
"loss": 1.7845, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6.299999999999999e-05, | |
"loss": 1.625, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6.599999999999999e-05, | |
"loss": 1.6687, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 6.9e-05, | |
"loss": 1.6592, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.199999999999999e-05, | |
"loss": 1.6339, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.5e-05, | |
"loss": 1.5621, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.8e-05, | |
"loss": 1.51, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.1e-05, | |
"loss": 1.5743, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.4e-05, | |
"loss": 1.4974, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.699999999999999e-05, | |
"loss": 1.513, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.999999999999999e-05, | |
"loss": 1.4505, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.3e-05, | |
"loss": 1.4374, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.599999999999999e-05, | |
"loss": 1.4201, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.9e-05, | |
"loss": 1.3513, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.000102, | |
"loss": 1.3564, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00010499999999999999, | |
"loss": 1.2858, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00010799999999999998, | |
"loss": 1.2458, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00011099999999999999, | |
"loss": 1.2151, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.00011399999999999999, | |
"loss": 1.1821, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.000117, | |
"loss": 1.164, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00011999999999999999, | |
"loss": 1.1299, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00012299999999999998, | |
"loss": 1.1514, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00012599999999999997, | |
"loss": 1.0846, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000129, | |
"loss": 1.0845, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00013199999999999998, | |
"loss": 1.0378, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000135, | |
"loss": 1.0215, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.000138, | |
"loss": 1.0468, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00014099999999999998, | |
"loss": 1.0085, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00014399999999999998, | |
"loss": 1.0042, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.000147, | |
"loss": 0.9914, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00015, | |
"loss": 1.0245, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00015299999999999998, | |
"loss": 0.9608, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000156, | |
"loss": 0.9765, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000159, | |
"loss": 0.9647, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.000162, | |
"loss": 0.9637, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.000165, | |
"loss": 0.9745, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.000168, | |
"loss": 0.968, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00017099999999999998, | |
"loss": 1.0064, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00017399999999999997, | |
"loss": 0.9273, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00017699999999999997, | |
"loss": 0.9038, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00017999999999999998, | |
"loss": 0.8971, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00018299999999999998, | |
"loss": 0.8887, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.000186, | |
"loss": 0.8992, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00018899999999999999, | |
"loss": 0.8799, | |
"step": 63 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00019199999999999998, | |
"loss": 0.8914, | |
"step": 64 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.000195, | |
"loss": 0.8953, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.000198, | |
"loss": 0.8819, | |
"step": 66 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.000201, | |
"loss": 0.8911, | |
"step": 67 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.000204, | |
"loss": 0.9396, | |
"step": 68 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00020699999999999996, | |
"loss": 0.8563, | |
"step": 69 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00020999999999999998, | |
"loss": 0.8911, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021299999999999997, | |
"loss": 0.89, | |
"step": 71 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021599999999999996, | |
"loss": 0.8253, | |
"step": 72 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00021899999999999998, | |
"loss": 0.8842, | |
"step": 73 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.00022199999999999998, | |
"loss": 0.8779, | |
"step": 74 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.000225, | |
"loss": 0.8851, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00022799999999999999, | |
"loss": 0.8743, | |
"step": 76 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00023099999999999998, | |
"loss": 0.8802, | |
"step": 77 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.000234, | |
"loss": 0.8775, | |
"step": 78 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.000237, | |
"loss": 0.865, | |
"step": 79 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00023999999999999998, | |
"loss": 0.8602, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.000243, | |
"loss": 0.8278, | |
"step": 81 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00024599999999999996, | |
"loss": 0.8699, | |
"step": 82 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.000249, | |
"loss": 0.871, | |
"step": 83 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.00025199999999999995, | |
"loss": 0.8603, | |
"step": 84 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.00025499999999999996, | |
"loss": 0.8488, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.000258, | |
"loss": 0.885, | |
"step": 86 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.000261, | |
"loss": 0.8583, | |
"step": 87 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.00026399999999999997, | |
"loss": 0.8771, | |
"step": 88 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.000267, | |
"loss": 0.8723, | |
"step": 89 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 0.00027, | |
"loss": 0.8741, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.00027299999999999997, | |
"loss": 0.8475, | |
"step": 91 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000276, | |
"loss": 0.8693, | |
"step": 92 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000279, | |
"loss": 0.891, | |
"step": 93 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.00028199999999999997, | |
"loss": 0.8563, | |
"step": 94 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.000285, | |
"loss": 0.844, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00028799999999999995, | |
"loss": 0.8566, | |
"step": 96 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00029099999999999997, | |
"loss": 0.8621, | |
"step": 97 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.000294, | |
"loss": 0.8403, | |
"step": 98 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.00029699999999999996, | |
"loss": 0.8866, | |
"step": 99 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.0003, | |
"loss": 0.8684, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029999199041570257, | |
"loss": 0.8769, | |
"step": 101 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029996796251818966, | |
"loss": 0.8124, | |
"step": 102 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 0.00029992791887350736, | |
"loss": 0.841, | |
"step": 103 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002998718637580951, | |
"loss": 0.8653, | |
"step": 104 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002997998031583285, | |
"loss": 0.8434, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002997117447698802, | |
"loss": 0.8748, | |
"step": 106 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.00029960769799689793, | |
"loss": 0.8647, | |
"step": 107 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029948767395100045, | |
"loss": 0.8731, | |
"step": 108 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.0002993516854500905, | |
"loss": 0.8323, | |
"step": 109 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029919974701698635, | |
"loss": 0.8492, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.00029903187487787046, | |
"loss": 0.8608, | |
"step": 111 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002988480869605567, | |
"loss": 0.8776, | |
"step": 112 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002986484028925761, | |
"loss": 0.8521, | |
"step": 113 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002984328439990804, | |
"loss": 0.822, | |
"step": 114 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002982014333005645, | |
"loss": 0.7943, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029795419551040833, | |
"loss": 0.8497, | |
"step": 116 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029769115703223763, | |
"loss": 0.8067, | |
"step": 117 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.0002974123459571039, | |
"loss": 0.8534, | |
"step": 118 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.00029711779206048454, | |
"loss": 0.8412, | |
"step": 119 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.00029680752679910315, | |
"loss": 0.8596, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.00029648158330756986, | |
"loss": 0.8492, | |
"step": 121 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0002961399963948431, | |
"loss": 0.8469, | |
"step": 122 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0002957828025405117, | |
"loss": 0.8642, | |
"step": 123 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.0002954100398908995, | |
"loss": 0.8423, | |
"step": 124 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.00029502174825499146, | |
"loss": 0.8727, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.000294617969100182, | |
"loss": 0.8712, | |
"step": 126 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.00029419874554784695, | |
"loss": 0.8377, | |
"step": 127 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.0002937641223687379, | |
"loss": 0.8405, | |
"step": 128 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029331414597820145, | |
"loss": 0.8378, | |
"step": 129 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029284886443122214, | |
"loss": 0.8302, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 0.00029236832741729016, | |
"loss": 0.9031, | |
"step": 131 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.00029187258625509513, | |
"loss": 0.8793, | |
"step": 132 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002913616938870455, | |
"loss": 0.7985, | |
"step": 133 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002908357048736144, | |
"loss": 0.8195, | |
"step": 134 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.00029029467538751303, | |
"loss": 0.859, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.00028973866320769183, | |
"loss": 0.8469, | |
"step": 136 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.00028916772771316973, | |
"loss": 0.813, | |
"step": 137 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.000288581929876693, | |
"loss": 0.8844, | |
"step": 138 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.0002879813322582237, | |
"loss": 0.8435, | |
"step": 139 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028736599899825856, | |
"loss": 0.8526, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.0002867359958109792, | |
"loss": 0.8495, | |
"step": 141 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028609138997723397, | |
"loss": 0.8697, | |
"step": 142 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 0.00028543225033735313, | |
"loss": 0.8201, | |
"step": 143 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002847586472837968, | |
"loss": 0.8113, | |
"step": 144 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.00028407065275363753, | |
"loss": 0.8414, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002833683402208777, | |
"loss": 0.8668, | |
"step": 146 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002826517846886033, | |
"loss": 0.8236, | |
"step": 147 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.00028192106268097334, | |
"loss": 0.8743, | |
"step": 148 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.0002811762522350481, | |
"loss": 0.8138, | |
"step": 149 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.000280417432892455, | |
"loss": 0.8432, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.0002796446856908939, | |
"loss": 0.826, | |
"step": 151 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002788580931554828, | |
"loss": 0.8556, | |
"step": 152 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002780577392899446, | |
"loss": 0.8358, | |
"step": 153 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.00027724370956763603, | |
"loss": 0.8656, | |
"step": 154 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 0.0002764160909224196, | |
"loss": 0.8393, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00027557497173937923, | |
"loss": 0.8464, | |
"step": 156 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.0002747204418453818, | |
"loss": 0.8046, | |
"step": 157 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.00027385259249948333, | |
"loss": 0.8221, | |
"step": 158 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.000272971516383184, | |
"loss": 0.8424, | |
"step": 159 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.00027207730759052924, | |
"loss": 0.8172, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.0002711700616180619, | |
"loss": 0.8373, | |
"step": 161 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.0002702498753546232, | |
"loss": 0.8903, | |
"step": 162 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 0.00026931684707100586, | |
"loss": 0.8203, | |
"step": 163 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026837107640945905, | |
"loss": 0.8212, | |
"step": 164 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026741266437304716, | |
"loss": 0.8103, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.0002664417133148636, | |
"loss": 0.8475, | |
"step": 166 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.00026545832692709964, | |
"loss": 0.8714, | |
"step": 167 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.00026446261022997097, | |
"loss": 0.871, | |
"step": 168 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.00026345466956050176, | |
"loss": 0.8585, | |
"step": 169 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.0002624346125611689, | |
"loss": 0.8292, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.000261402548168406, | |
"loss": 0.8579, | |
"step": 171 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002603585866009697, | |
"loss": 0.8106, | |
"step": 172 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002593028393481692, | |
"loss": 0.8582, | |
"step": 173 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.0002582354191579593, | |
"loss": 0.8508, | |
"step": 174 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 0.00025715644002489996, | |
"loss": 0.8388, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025606601717798207, | |
"loss": 0.8449, | |
"step": 176 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025496426706832193, | |
"loss": 0.8649, | |
"step": 177 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.0002538513073567244, | |
"loss": 0.8672, | |
"step": 178 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.00025272725690111806, | |
"loss": 0.8367, | |
"step": 179 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00025159223574386114, | |
"loss": 0.8433, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00025044636509892227, | |
"loss": 0.7995, | |
"step": 181 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.00024928976733893494, | |
"loss": 0.8302, | |
"step": 182 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.0002481225659821294, | |
"loss": 0.8565, | |
"step": 183 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024694488567914106, | |
"loss": 0.8723, | |
"step": 184 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0002457568521996988, | |
"loss": 0.8731, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024455859241919326, | |
"loss": 0.856, | |
"step": 186 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0002433502343051274, | |
"loss": 0.8385, | |
"step": 187 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.00024213190690345018, | |
"loss": 0.8431, | |
"step": 188 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00024090374032477533, | |
"loss": 0.865, | |
"step": 189 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.0002396658657304861, | |
"loss": 0.8676, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00023841841531872798, | |
"loss": 0.8143, | |
"step": 191 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.00023716152231029072, | |
"loss": 0.8507, | |
"step": 192 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.000235895320934381, | |
"loss": 0.8582, | |
"step": 193 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.00023461994641428766, | |
"loss": 0.8621, | |
"step": 194 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.0002333355349529403, | |
"loss": 0.8503, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.00023204222371836405, | |
"loss": 0.8339, | |
"step": 196 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00023074015082903015, | |
"loss": 0.86, | |
"step": 197 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.0002294294553391063, | |
"loss": 0.798, | |
"step": 198 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00022811027722360598, | |
"loss": 0.8391, | |
"step": 199 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.00022678275736344014, | |
"loss": 0.7992, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022544703753037178, | |
"loss": 0.8329, | |
"step": 201 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022410326037187558, | |
"loss": 0.8191, | |
"step": 202 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022275156939590392, | |
"loss": 0.8405, | |
"step": 203 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.00022139210895556104, | |
"loss": 0.8587, | |
"step": 204 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.00022002502423368678, | |
"loss": 0.8691, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0002186504612273522, | |
"loss": 0.8381, | |
"step": 206 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0002172685667322676, | |
"loss": 0.8781, | |
"step": 207 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.00021587948832710554, | |
"loss": 0.8304, | |
"step": 208 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002144833743577405, | |
"loss": 0.824, | |
"step": 209 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002130803739214061, | |
"loss": 0.8309, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.00021167063685077262, | |
"loss": 0.7997, | |
"step": 211 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002102543136979454, | |
"loss": 0.8495, | |
"step": 212 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020883155571838692, | |
"loss": 0.834, | |
"step": 213 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020740251485476345, | |
"loss": 0.897, | |
"step": 214 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020596734372071852, | |
"loss": 0.8345, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 0.00020452619558457446, | |
"loss": 0.8458, | |
"step": 216 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00020307922435296443, | |
"loss": 0.8221, | |
"step": 217 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.0002016265845543958, | |
"loss": 0.8335, | |
"step": 218 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00020016843132274746, | |
"loss": 0.8065, | |
"step": 219 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.00019870492038070252, | |
"loss": 0.8264, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019723620802311774, | |
"loss": 0.8727, | |
"step": 221 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019576245110033231, | |
"loss": 0.843, | |
"step": 222 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019428380700141698, | |
"loss": 0.8805, | |
"step": 223 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.00019280043363736579, | |
"loss": 0.8282, | |
"step": 224 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.0001913124894242322, | |
"loss": 0.8405, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018982013326621083, | |
"loss": 0.8305, | |
"step": 226 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018832352453866777, | |
"loss": 0.8385, | |
"step": 227 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 0.00018682282307111987, | |
"loss": 0.8179, | |
"step": 228 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018531818913016584, | |
"loss": 0.8599, | |
"step": 229 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018380978340237092, | |
"loss": 0.8344, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018229776697710617, | |
"loss": 0.8516, | |
"step": 231 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.00018078230132934512, | |
"loss": 0.8463, | |
"step": 232 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017926354830241924, | |
"loss": 0.8366, | |
"step": 233 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017774167009073377, | |
"loss": 0.833, | |
"step": 234 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017621682922244633, | |
"loss": 0.8045, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 0.00017468918854211007, | |
"loss": 0.8242, | |
"step": 236 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0001731589111932823, | |
"loss": 0.8476, | |
"step": 237 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.000171626160601102, | |
"loss": 0.8633, | |
"step": 238 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0001700911004548369, | |
"loss": 0.8443, | |
"step": 239 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.00016855389469040217, | |
"loss": 0.8813, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016701470747285317, | |
"loss": 0.8224, | |
"step": 241 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016547370317885354, | |
"loss": 0.8262, | |
"step": 242 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.0001639310463791205, | |
"loss": 0.836, | |
"step": 243 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.00016238690182084986, | |
"loss": 0.8262, | |
"step": 244 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.00016084143441012156, | |
"loss": 0.8434, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.0001592948091942892, | |
"loss": 0.8431, | |
"step": 246 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.0001577471913443532, | |
"loss": 0.8592, | |
"step": 247 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 0.00015619874613732196, | |
"loss": 0.8309, | |
"step": 248 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.0001546496389385611, | |
"loss": 0.8255, | |
"step": 249 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015310003518413315, | |
"loss": 0.7773, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015155010036313008, | |
"loss": 0.8441, | |
"step": 251 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.00015, | |
"loss": 0.8472, | |
"step": 252 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014844989963686992, | |
"loss": 0.8443, | |
"step": 253 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014689996481586688, | |
"loss": 0.841, | |
"step": 254 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.00014535036106143892, | |
"loss": 0.835, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.000143801253862678, | |
"loss": 0.8462, | |
"step": 256 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001422528086556468, | |
"loss": 0.8092, | |
"step": 257 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001407051908057108, | |
"loss": 0.8208, | |
"step": 258 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.0001391585655898784, | |
"loss": 0.8446, | |
"step": 259 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 0.00013761309817915014, | |
"loss": 0.7963, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013606895362087949, | |
"loss": 0.8162, | |
"step": 261 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013452629682114646, | |
"loss": 0.8829, | |
"step": 262 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013298529252714684, | |
"loss": 0.8289, | |
"step": 263 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.00013144610530959784, | |
"loss": 0.848, | |
"step": 264 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.0001299088995451631, | |
"loss": 0.7897, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012837383939889798, | |
"loss": 0.8196, | |
"step": 266 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012684108880671772, | |
"loss": 0.7939, | |
"step": 267 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 0.00012531081145788987, | |
"loss": 0.8232, | |
"step": 268 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012378317077755362, | |
"loss": 0.8411, | |
"step": 269 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012225832990926623, | |
"loss": 0.8726, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00012073645169758076, | |
"loss": 0.7977, | |
"step": 271 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.00011921769867065485, | |
"loss": 0.8042, | |
"step": 272 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011770223302289385, | |
"loss": 0.7933, | |
"step": 273 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.0001161902165976291, | |
"loss": 0.8396, | |
"step": 274 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011468181086983412, | |
"loss": 0.8328, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.00011317717692888012, | |
"loss": 0.8353, | |
"step": 276 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.0001116764754613322, | |
"loss": 0.801, | |
"step": 277 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00011017986673378918, | |
"loss": 0.842, | |
"step": 278 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010868751057576782, | |
"loss": 0.8578, | |
"step": 279 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010719956636263423, | |
"loss": 0.8259, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 0.00010571619299858303, | |
"loss": 0.8238, | |
"step": 281 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010423754889966769, | |
"loss": 0.8322, | |
"step": 282 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010276379197688222, | |
"loss": 0.8194, | |
"step": 283 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.00010129507961929748, | |
"loss": 0.8031, | |
"step": 284 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 9.983156867725255e-05, | |
"loss": 0.8259, | |
"step": 285 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.837341544560423e-05, | |
"loss": 0.8215, | |
"step": 286 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.692077564703555e-05, | |
"loss": 0.8282, | |
"step": 287 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.547380441542549e-05, | |
"loss": 0.85, | |
"step": 288 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 9.403265627928147e-05, | |
"loss": 0.8034, | |
"step": 289 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 9.259748514523653e-05, | |
"loss": 0.8197, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 9.116844428161309e-05, | |
"loss": 0.776, | |
"step": 291 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 8.97456863020546e-05, | |
"loss": 0.8112, | |
"step": 292 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 8.83293631492274e-05, | |
"loss": 0.827, | |
"step": 293 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.691962607859386e-05, | |
"loss": 0.8157, | |
"step": 294 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.55166256422595e-05, | |
"loss": 0.8386, | |
"step": 295 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.412051167289446e-05, | |
"loss": 0.8088, | |
"step": 296 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 8.27314332677324e-05, | |
"loss": 0.8409, | |
"step": 297 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 8.134953877264778e-05, | |
"loss": 0.81, | |
"step": 298 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.997497576631323e-05, | |
"loss": 0.8396, | |
"step": 299 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.860789104443896e-05, | |
"loss": 0.8428, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.724843060409606e-05, | |
"loss": 0.8172, | |
"step": 301 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.589673962812442e-05, | |
"loss": 0.8108, | |
"step": 302 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.455296246962823e-05, | |
"loss": 0.8459, | |
"step": 303 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.321724263655988e-05, | |
"loss": 0.8168, | |
"step": 304 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.188972277639405e-05, | |
"loss": 0.817, | |
"step": 305 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 7.057054466089371e-05, | |
"loss": 0.8435, | |
"step": 306 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.925984917096985e-05, | |
"loss": 0.8272, | |
"step": 307 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.795777628163599e-05, | |
"loss": 0.8508, | |
"step": 308 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.66644650470597e-05, | |
"loss": 0.8529, | |
"step": 309 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.538005358571234e-05, | |
"loss": 0.8434, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.410467906561896e-05, | |
"loss": 0.8311, | |
"step": 311 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.283847768970926e-05, | |
"loss": 0.789, | |
"step": 312 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.158158468127196e-05, | |
"loss": 0.824, | |
"step": 313 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 6.0334134269513865e-05, | |
"loss": 0.8425, | |
"step": 314 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.9096259675224647e-05, | |
"loss": 0.8228, | |
"step": 315 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.786809309654982e-05, | |
"loss": 0.8333, | |
"step": 316 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5.664976569487263e-05, | |
"loss": 0.8656, | |
"step": 317 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.5441407580806745e-05, | |
"loss": 0.8228, | |
"step": 318 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.4243147800301134e-05, | |
"loss": 0.7834, | |
"step": 319 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.305511432085884e-05, | |
"loss": 0.7926, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 5.187743401787054e-05, | |
"loss": 0.8453, | |
"step": 321 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 5.071023266106502e-05, | |
"loss": 0.848, | |
"step": 322 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.955363490107777e-05, | |
"loss": 0.8579, | |
"step": 323 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.840776425613886e-05, | |
"loss": 0.826, | |
"step": 324 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 4.727274309888191e-05, | |
"loss": 0.8296, | |
"step": 325 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.614869264327553e-05, | |
"loss": 0.8263, | |
"step": 326 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.503573293167805e-05, | |
"loss": 0.8345, | |
"step": 327 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.3933982822017876e-05, | |
"loss": 0.8175, | |
"step": 328 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 4.284355997510003e-05, | |
"loss": 0.8081, | |
"step": 329 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 4.17645808420407e-05, | |
"loss": 0.8121, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 4.0697160651830814e-05, | |
"loss": 0.8072, | |
"step": 331 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.964141339903026e-05, | |
"loss": 0.8569, | |
"step": 332 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.8597451831594014e-05, | |
"loss": 0.8123, | |
"step": 333 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.756538743883111e-05, | |
"loss": 0.8379, | |
"step": 334 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.654533043949823e-05, | |
"loss": 0.805, | |
"step": 335 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.5537389770029046e-05, | |
"loss": 0.8434, | |
"step": 336 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 3.454167307290036e-05, | |
"loss": 0.8017, | |
"step": 337 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.3558286685136384e-05, | |
"loss": 0.8328, | |
"step": 338 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.258733562695283e-05, | |
"loss": 0.8238, | |
"step": 339 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.162892359054098e-05, | |
"loss": 0.8477, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.0683152928994105e-05, | |
"loss": 0.8162, | |
"step": 341 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.9750124645376755e-05, | |
"loss": 0.8293, | |
"step": 342 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.8829938381938117e-05, | |
"loss": 0.8393, | |
"step": 343 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.792269240947076e-05, | |
"loss": 0.8461, | |
"step": 344 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 2.702848361681605e-05, | |
"loss": 0.8294, | |
"step": 345 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.6147407500516643e-05, | |
"loss": 0.8482, | |
"step": 346 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.5279558154618197e-05, | |
"loss": 0.8296, | |
"step": 347 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.4425028260620715e-05, | |
"loss": 0.8322, | |
"step": 348 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 2.35839090775804e-05, | |
"loss": 0.8226, | |
"step": 349 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.2756290432363957e-05, | |
"loss": 0.8192, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.1942260710055386e-05, | |
"loss": 0.8226, | |
"step": 351 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.1141906844517203e-05, | |
"loss": 0.8049, | |
"step": 352 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 2.0355314309106097e-05, | |
"loss": 0.8261, | |
"step": 353 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.9582567107544962e-05, | |
"loss": 0.8317, | |
"step": 354 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.882374776495187e-05, | |
"loss": 0.8421, | |
"step": 355 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.8078937319026654e-05, | |
"loss": 0.834, | |
"step": 356 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 1.734821531139667e-05, | |
"loss": 0.8752, | |
"step": 357 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.663165977912221e-05, | |
"loss": 0.8185, | |
"step": 358 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.5929347246362452e-05, | |
"loss": 0.8115, | |
"step": 359 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.524135271620317e-05, | |
"loss": 0.8062, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 1.456774966264685e-05, | |
"loss": 0.8766, | |
"step": 361 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.390861002276602e-05, | |
"loss": 0.818, | |
"step": 362 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.3264004189020777e-05, | |
"loss": 0.7928, | |
"step": 363 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.2634001001741373e-05, | |
"loss": 0.8159, | |
"step": 364 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 1.2018667741776266e-05, | |
"loss": 0.8423, | |
"step": 365 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.1418070123306989e-05, | |
"loss": 0.8281, | |
"step": 366 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.0832272286830285e-05, | |
"loss": 0.8235, | |
"step": 367 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 1.0261336792308167e-05, | |
"loss": 0.8432, | |
"step": 368 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 9.705324612486936e-06, | |
"loss": 0.8614, | |
"step": 369 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 9.164295126385562e-06, | |
"loss": 0.8353, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 8.638306112954452e-06, | |
"loss": 0.8301, | |
"step": 371 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 8.127413744904804e-06, | |
"loss": 0.7989, | |
"step": 372 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 7.631672582709808e-06, | |
"loss": 0.8164, | |
"step": 373 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 7.151135568777838e-06, | |
"loss": 0.8253, | |
"step": 374 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 6.685854021798509e-06, | |
"loss": 0.8161, | |
"step": 375 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 6.235877631262093e-06, | |
"loss": 0.8208, | |
"step": 376 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 5.801254452153081e-06, | |
"loss": 0.8605, | |
"step": 377 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 5.3820308998179575e-06, | |
"loss": 0.7993, | |
"step": 378 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 4.978251745008527e-06, | |
"loss": 0.8198, | |
"step": 379 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 4.589960109100444e-06, | |
"loss": 0.841, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 4.217197459488292e-06, | |
"loss": 0.841, | |
"step": 381 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 3.86000360515688e-06, | |
"loss": 0.8414, | |
"step": 382 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 3.518416692430076e-06, | |
"loss": 0.8413, | |
"step": 383 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 3.192473200896828e-06, | |
"loss": 0.8326, | |
"step": 384 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 2.882207939515435e-06, | |
"loss": 0.8241, | |
"step": 385 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 2.587654042896087e-06, | |
"loss": 0.842, | |
"step": 386 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 2.3088429677623423e-06, | |
"loss": 0.8064, | |
"step": 387 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 2.0458044895916513e-06, | |
"loss": 0.7889, | |
"step": 388 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 1.7985666994355164e-06, | |
"loss": 0.8061, | |
"step": 389 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 1.5671560009195894e-06, | |
"loss": 0.8665, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 1.351597107423813e-06, | |
"loss": 0.8251, | |
"step": 391 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 1.1519130394432474e-06, | |
"loss": 0.8517, | |
"step": 392 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 9.681251221295539e-07, | |
"loss": 0.8194, | |
"step": 393 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 8.002529830136162e-07, | |
"loss": 0.8388, | |
"step": 394 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 6.483145499094344e-07, | |
"loss": 0.8054, | |
"step": 395 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 5.123260489995229e-07, | |
"loss": 0.8385, | |
"step": 396 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 3.923020031020296e-07, | |
"loss": 0.8401, | |
"step": 397 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 2.8825523011977715e-07, | |
"loss": 0.8336, | |
"step": 398 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 2.001968416714572e-07, | |
"loss": 0.8376, | |
"step": 399 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 1.2813624190484705e-07, | |
"loss": 0.8082, | |
"step": 400 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 404, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 25, | |
"total_flos": 4.495223430237389e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |