BobaZooba commited on
Commit
a5c794d
1 Parent(s): b0e8b84

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf1ef681bacc89a848700eeaa4ef92416ef6e365bf799a8e2fa4dfe44c59282e
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0dc74eb4f1664d79e8ca2ad4c0dcaf65b8ebee3025e61594d69d01fb996247d
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45185dea832327b9d68a06373b811b428124e11655ef59d9751c97288f635b17
3
  size 42545748
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa94faad5f7c2fac41c1d3489928c35288395f9537c10cdb8f345784281ca99
3
  size 42545748
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8342d6f38894c838a543183f90853fe4cfc9a7f7a735ec728b9f9f2daf305e32
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d3fe4297fd36d6939c92e784b1ab4781c227d97ebf023707e527bcbb1c9fe4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3102e3be46ad9a9b5ceee743993b1b6ad0818bd4c7d22888f9c7d0317f12b2bd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d102a8b779e588307a5ff1ccea3e1e9ce6254fd84338a3b83b04cad35c2649e7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0006218905472636816,
5
  "eval_steps": 1000,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -157,13 +157,163 @@
157
  "learning_rate": 0.00016421052631578948,
158
  "loss": 1.7079,
159
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  }
161
  ],
162
  "logging_steps": 1,
163
  "max_steps": 100,
164
  "num_train_epochs": 1,
165
  "save_steps": 25,
166
- "total_flos": 1319123636748288.0,
167
  "trial_name": null,
168
  "trial_params": null
169
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0012437810945273632,
5
  "eval_steps": 1000,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
157
  "learning_rate": 0.00016421052631578948,
158
  "loss": 1.7079,
159
  "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.0,
163
+ "learning_rate": 0.00016210526315789473,
164
+ "loss": 1.4506,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.0,
169
+ "learning_rate": 0.00016,
170
+ "loss": 1.673,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.0,
175
+ "learning_rate": 0.00015789473684210527,
176
+ "loss": 1.5159,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.0,
181
+ "learning_rate": 0.00015578947368421052,
182
+ "loss": 1.8044,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.0,
187
+ "learning_rate": 0.0001536842105263158,
188
+ "loss": 1.311,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.0,
193
+ "learning_rate": 0.00015157894736842108,
194
+ "loss": 1.5876,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.0,
199
+ "learning_rate": 0.00014947368421052633,
200
+ "loss": 1.5843,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.0,
205
+ "learning_rate": 0.00014736842105263158,
206
+ "loss": 1.7256,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.0,
211
+ "learning_rate": 0.00014526315789473686,
212
+ "loss": 1.5446,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.0,
217
+ "learning_rate": 0.0001431578947368421,
218
+ "loss": 1.9188,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.0,
223
+ "learning_rate": 0.00014105263157894736,
224
+ "loss": 1.6284,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.0,
229
+ "learning_rate": 0.00013894736842105264,
230
+ "loss": 1.6446,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.0,
235
+ "learning_rate": 0.0001368421052631579,
236
+ "loss": 1.5255,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.0,
241
+ "learning_rate": 0.00013473684210526317,
242
+ "loss": 1.4796,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.0,
247
+ "learning_rate": 0.00013263157894736842,
248
+ "loss": 1.8069,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.0,
253
+ "learning_rate": 0.0001305263157894737,
254
+ "loss": 1.5269,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.0,
259
+ "learning_rate": 0.00012842105263157895,
260
+ "loss": 1.3387,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.0,
265
+ "learning_rate": 0.0001263157894736842,
266
+ "loss": 1.3193,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.0,
271
+ "learning_rate": 0.00012421052631578949,
272
+ "loss": 1.6728,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.0,
277
+ "learning_rate": 0.00012210526315789474,
278
+ "loss": 1.5991,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.0,
283
+ "learning_rate": 0.00012,
284
+ "loss": 1.6619,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.0,
289
+ "learning_rate": 0.00011789473684210525,
290
+ "loss": 1.4312,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.0,
295
+ "learning_rate": 0.00011578947368421053,
296
+ "loss": 1.4846,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.0,
301
+ "learning_rate": 0.0001136842105263158,
302
+ "loss": 1.3929,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.0,
307
+ "learning_rate": 0.00011157894736842105,
308
+ "loss": 1.647,
309
+ "step": 50
310
  }
311
  ],
312
  "logging_steps": 1,
313
  "max_steps": 100,
314
  "num_train_epochs": 1,
315
  "save_steps": 25,
316
+ "total_flos": 2841070876655616.0,
317
  "trial_name": null,
318
  "trial_params": null
319
  }