Pranay17 commited on
Commit
0c7e056
·
verified ·
1 Parent(s): 9d6e0bd

Training in progress, step 2000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f6c9491adb6089056fda920e27d5868f9c470a97da0823064432f6788c44756
3
  size 42002584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d748f56c86d9b2ec091d44a85ccd94175df63c0bf630350ae13455b500f5873c
3
  size 42002584
last-checkpoint/global_step2000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81a3519a1a39e227ba1794212ac52f9ee969bfbc9ef603837beadb366458e497
3
+ size 251710672
last-checkpoint/global_step2000/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:209a7a979e924832a384d3d2fecc6c99d750e116bf2e80357edb2195876eb41a
3
+ size 47955328
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1500
 
1
+ global_step2000
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:721c0a2243fa57a1e6ca760a9478078a0f6bb44a899a4293351badcaa44577ca
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ff687702c9cb54cfdeb1509074bb19e28d6929ec859a93af35778558181b6e
3
  size 14244
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 8.547008547008547,
5
  "eval_steps": 1000,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -224,6 +224,76 @@
224
  "learning_rate": 0.00012522522522522524,
225
  "loss": 0.0708,
226
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  }
228
  ],
229
  "logging_steps": 50,
@@ -243,7 +313,7 @@
243
  "attributes": {}
244
  }
245
  },
246
- "total_flos": 4.082014482962842e+16,
247
  "train_batch_size": 2,
248
  "trial_name": null,
249
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 11.396011396011396,
5
  "eval_steps": 1000,
6
+ "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
224
  "learning_rate": 0.00012522522522522524,
225
  "loss": 0.0708,
226
  "step": 1500
227
+ },
228
+ {
229
+ "epoch": 8.831908831908832,
230
+ "grad_norm": 1.3793889284133911,
231
+ "learning_rate": 0.00012272272272272273,
232
+ "loss": 0.0748,
233
+ "step": 1550
234
+ },
235
+ {
236
+ "epoch": 9.116809116809117,
237
+ "grad_norm": 0.7198516130447388,
238
+ "learning_rate": 0.00012022022022022023,
239
+ "loss": 0.0695,
240
+ "step": 1600
241
+ },
242
+ {
243
+ "epoch": 9.401709401709402,
244
+ "grad_norm": 0.4231278896331787,
245
+ "learning_rate": 0.00011771771771771771,
246
+ "loss": 0.0671,
247
+ "step": 1650
248
+ },
249
+ {
250
+ "epoch": 9.686609686609687,
251
+ "grad_norm": 2.0300514698028564,
252
+ "learning_rate": 0.00011521521521521521,
253
+ "loss": 0.0659,
254
+ "step": 1700
255
+ },
256
+ {
257
+ "epoch": 9.971509971509972,
258
+ "grad_norm": 1.0142486095428467,
259
+ "learning_rate": 0.00011271271271271271,
260
+ "loss": 0.0714,
261
+ "step": 1750
262
+ },
263
+ {
264
+ "epoch": 10.256410256410255,
265
+ "grad_norm": 0.3013649582862854,
266
+ "learning_rate": 0.0001102102102102102,
267
+ "loss": 0.0625,
268
+ "step": 1800
269
+ },
270
+ {
271
+ "epoch": 10.54131054131054,
272
+ "grad_norm": 0.974827766418457,
273
+ "learning_rate": 0.00010770770770770771,
274
+ "loss": 0.0651,
275
+ "step": 1850
276
+ },
277
+ {
278
+ "epoch": 10.826210826210826,
279
+ "grad_norm": 1.0164273977279663,
280
+ "learning_rate": 0.0001052052052052052,
281
+ "loss": 0.066,
282
+ "step": 1900
283
+ },
284
+ {
285
+ "epoch": 11.11111111111111,
286
+ "grad_norm": 0.12340305745601654,
287
+ "learning_rate": 0.0001027027027027027,
288
+ "loss": 0.0658,
289
+ "step": 1950
290
+ },
291
+ {
292
+ "epoch": 11.396011396011396,
293
+ "grad_norm": 1.1315168142318726,
294
+ "learning_rate": 0.0001002002002002002,
295
+ "loss": 0.0611,
296
+ "step": 2000
297
  }
298
  ],
299
  "logging_steps": 50,
 
313
  "attributes": {}
314
  }
315
  },
316
+ "total_flos": 5.442536950726656e+16,
317
  "train_batch_size": 2,
318
  "trial_name": null,
319
  "trial_params": null