dzanbek commited on
Commit
8241f4a
·
verified ·
1 Parent(s): f014576

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3558b5f17b9fff7e7424e90889b82673f72fba63ca6ce02b3d2f53c8dd6db8dd
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9452f8b9dfac05067c81a5ce4311e08d2e7f2c9627cc23c651e223d7792566c
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a41e532f7dca479b1520e59da012bffa26af3383bab43d02f08f0aa683fd512
3
  size 160284754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6837756df16d4b861371d6ae7755d2c5a377b47c1258b2d95da10db2ac011be7
3
  size 160284754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2972d332e3d5fa4abdd6b0429885a3ecc36eabdf15f91d3aebed7cea929537a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b34fe06338d52b135f06f73901cbbeeea2e9a59fb363b8294881aaf807d72655
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2ed9259304616a8ecebc61c5d000777b2978635f7a705b8d7081c480ce0bde
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.005963918294319368,
5
  "eval_steps": 2,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -125,6 +125,116 @@
125
  "eval_samples_per_second": 4.298,
126
  "eval_steps_per_second": 2.149,
127
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  }
129
  ],
130
  "logging_steps": 1,
@@ -139,12 +249,12 @@
139
  "should_evaluate": false,
140
  "should_log": false,
141
  "should_save": true,
142
- "should_training_stop": false
143
  },
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 6591291693465600.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.011927836588638736,
5
  "eval_steps": 2,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
125
  "eval_samples_per_second": 4.298,
126
  "eval_steps_per_second": 2.149,
127
  "step": 10
128
+ },
129
+ {
130
+ "epoch": 0.006560310123751305,
131
+ "grad_norm": 1.138580083847046,
132
+ "learning_rate": 0.00019510565162951537,
133
+ "loss": 0.5474,
134
+ "step": 11
135
+ },
136
+ {
137
+ "epoch": 0.007156701953183241,
138
+ "grad_norm": 0.8171162605285645,
139
+ "learning_rate": 0.00018090169943749476,
140
+ "loss": 0.2495,
141
+ "step": 12
142
+ },
143
+ {
144
+ "epoch": 0.007156701953183241,
145
+ "eval_loss": 0.24059152603149414,
146
+ "eval_runtime": 164.1937,
147
+ "eval_samples_per_second": 4.3,
148
+ "eval_steps_per_second": 2.15,
149
+ "step": 12
150
+ },
151
+ {
152
+ "epoch": 0.007753093782615178,
153
+ "grad_norm": 0.9625129103660583,
154
+ "learning_rate": 0.00015877852522924732,
155
+ "loss": 0.1852,
156
+ "step": 13
157
+ },
158
+ {
159
+ "epoch": 0.008349485612047115,
160
+ "grad_norm": 1.333743929862976,
161
+ "learning_rate": 0.00013090169943749476,
162
+ "loss": 0.2489,
163
+ "step": 14
164
+ },
165
+ {
166
+ "epoch": 0.008349485612047115,
167
+ "eval_loss": 0.19883611798286438,
168
+ "eval_runtime": 164.7477,
169
+ "eval_samples_per_second": 4.285,
170
+ "eval_steps_per_second": 2.143,
171
+ "step": 14
172
+ },
173
+ {
174
+ "epoch": 0.008945877441479051,
175
+ "grad_norm": 1.2359386682510376,
176
+ "learning_rate": 0.0001,
177
+ "loss": 0.2844,
178
+ "step": 15
179
+ },
180
+ {
181
+ "epoch": 0.009542269270910989,
182
+ "grad_norm": 1.2958358526229858,
183
+ "learning_rate": 6.909830056250527e-05,
184
+ "loss": 0.2865,
185
+ "step": 16
186
+ },
187
+ {
188
+ "epoch": 0.009542269270910989,
189
+ "eval_loss": 0.17618103325366974,
190
+ "eval_runtime": 164.6292,
191
+ "eval_samples_per_second": 4.288,
192
+ "eval_steps_per_second": 2.144,
193
+ "step": 16
194
+ },
195
+ {
196
+ "epoch": 0.010138661100342925,
197
+ "grad_norm": 1.5453649759292603,
198
+ "learning_rate": 4.12214747707527e-05,
199
+ "loss": 0.1897,
200
+ "step": 17
201
+ },
202
+ {
203
+ "epoch": 0.010735052929774861,
204
+ "grad_norm": 1.1022392511367798,
205
+ "learning_rate": 1.9098300562505266e-05,
206
+ "loss": 0.0626,
207
+ "step": 18
208
+ },
209
+ {
210
+ "epoch": 0.010735052929774861,
211
+ "eval_loss": 0.15503820776939392,
212
+ "eval_runtime": 164.3594,
213
+ "eval_samples_per_second": 4.295,
214
+ "eval_steps_per_second": 2.148,
215
+ "step": 18
216
+ },
217
+ {
218
+ "epoch": 0.0113314447592068,
219
+ "grad_norm": 0.8071819543838501,
220
+ "learning_rate": 4.8943483704846475e-06,
221
+ "loss": 0.149,
222
+ "step": 19
223
+ },
224
+ {
225
+ "epoch": 0.011927836588638736,
226
+ "grad_norm": 0.8944453597068787,
227
+ "learning_rate": 0.0,
228
+ "loss": 0.1737,
229
+ "step": 20
230
+ },
231
+ {
232
+ "epoch": 0.011927836588638736,
233
+ "eval_loss": 0.15017978847026825,
234
+ "eval_runtime": 164.4532,
235
+ "eval_samples_per_second": 4.293,
236
+ "eval_steps_per_second": 2.147,
237
+ "step": 20
238
  }
239
  ],
240
  "logging_steps": 1,
 
249
  "should_evaluate": false,
250
  "should_log": false,
251
  "should_save": true,
252
+ "should_training_stop": true
253
  },
254
  "attributes": {}
255
  }
256
  },
257
+ "total_flos": 1.31825833869312e+16,
258
  "train_batch_size": 2,
259
  "trial_name": null,
260
  "trial_params": null