File size: 3,869 Bytes
4752c45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6187161639597835,
"eval_steps": 200,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.5968947410583496,
"learning_rate": 9.896800825593395e-05,
"loss": 2.3935,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 0.6443502306938171,
"learning_rate": 9.793601651186791e-05,
"loss": 2.1763,
"step": 20
},
{
"epoch": 0.09,
"grad_norm": 0.4056578576564789,
"learning_rate": 9.690402476780186e-05,
"loss": 1.9601,
"step": 30
},
{
"epoch": 0.12,
"grad_norm": 0.24697279930114746,
"learning_rate": 9.587203302373582e-05,
"loss": 1.9009,
"step": 40
},
{
"epoch": 0.15,
"grad_norm": 0.24417832493782043,
"learning_rate": 9.484004127966977e-05,
"loss": 1.8698,
"step": 50
},
{
"epoch": 0.19,
"grad_norm": 0.19971579313278198,
"learning_rate": 9.380804953560372e-05,
"loss": 1.8502,
"step": 60
},
{
"epoch": 0.22,
"grad_norm": 0.21794500946998596,
"learning_rate": 9.277605779153768e-05,
"loss": 1.8139,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 0.20081552863121033,
"learning_rate": 9.174406604747162e-05,
"loss": 1.8246,
"step": 80
},
{
"epoch": 0.28,
"grad_norm": 0.2045777291059494,
"learning_rate": 9.071207430340559e-05,
"loss": 1.8009,
"step": 90
},
{
"epoch": 0.31,
"grad_norm": 0.21513579785823822,
"learning_rate": 8.968008255933953e-05,
"loss": 1.7745,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 0.2838321924209595,
"learning_rate": 8.864809081527348e-05,
"loss": 1.7831,
"step": 110
},
{
"epoch": 0.37,
"grad_norm": 0.19812746345996857,
"learning_rate": 8.761609907120744e-05,
"loss": 1.7554,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 0.30143260955810547,
"learning_rate": 8.658410732714138e-05,
"loss": 1.7707,
"step": 130
},
{
"epoch": 0.43,
"grad_norm": 0.21341949701309204,
"learning_rate": 8.555211558307535e-05,
"loss": 1.7476,
"step": 140
},
{
"epoch": 0.46,
"grad_norm": 0.24006041884422302,
"learning_rate": 8.452012383900929e-05,
"loss": 1.7653,
"step": 150
},
{
"epoch": 0.49,
"grad_norm": 0.25095027685165405,
"learning_rate": 8.348813209494324e-05,
"loss": 1.7438,
"step": 160
},
{
"epoch": 0.53,
"grad_norm": 0.2602318525314331,
"learning_rate": 8.24561403508772e-05,
"loss": 1.7728,
"step": 170
},
{
"epoch": 0.56,
"grad_norm": 0.26738253235816956,
"learning_rate": 8.142414860681114e-05,
"loss": 1.7433,
"step": 180
},
{
"epoch": 0.59,
"grad_norm": 0.25230053067207336,
"learning_rate": 8.039215686274511e-05,
"loss": 1.7304,
"step": 190
},
{
"epoch": 0.62,
"grad_norm": 0.2360549122095108,
"learning_rate": 7.936016511867905e-05,
"loss": 1.7297,
"step": 200
},
{
"epoch": 0.62,
"eval_loss": 1.744746446609497,
"eval_runtime": 294.0669,
"eval_samples_per_second": 35.172,
"eval_steps_per_second": 4.397,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 969,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"total_flos": 8.880783974203392e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}
|