dimasik87 commited on
Commit
91e20a2
·
verified ·
1 Parent(s): e58c6e4

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04ee1664bd29ace74cd834abf6756c1c6ad85e31fbf6b86e8e410e92077635c6
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17d8f9775ef2fc3a60893fdca6a2f8fbd01a155a36670674014b038c051fc527
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b31db628080555eedcc22be53e02e0d4a14ffe0bf6a0862b41c6b5703441cc5a
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c445a33110cf590c00717fb81366f88f5a2e00b4e27ea9ef86d91ed7c3b78a
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:866fac83e501a78c2e403a48d4ac843b9f01dc29bc6df7eeed2838bc1ca90679
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce5912b5c181cd4af1762a4a94b7debbebce7064999c1ea87bd89f515b28a5f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbf1d1277664600b4e977089813b848af48515edea03c4bdcf1a506540fabd37
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c792918044964431737f4cb39f3769dbfd230048b1125ac69a6439eb6c8534b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1533406352683461,
5
  "eval_steps": 5,
6
- "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -316,6 +316,49 @@
316
  "eval_samples_per_second": 9.406,
317
  "eval_steps_per_second": 4.751,
318
  "step": 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  }
320
  ],
321
  "logging_steps": 1,
@@ -335,7 +378,7 @@
335
  "attributes": {}
336
  }
337
  },
338
- "total_flos": 2.589389947404288e+16,
339
  "train_batch_size": 2,
340
  "trial_name": null,
341
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.17524644030668127,
5
  "eval_steps": 5,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
316
  "eval_samples_per_second": 9.406,
317
  "eval_steps_per_second": 4.751,
318
  "step": 35
319
+ },
320
+ {
321
+ "epoch": 0.15772179627601315,
322
+ "grad_norm": 0.09998418390750885,
323
+ "learning_rate": 5.4600950026045326e-05,
324
+ "loss": 0.0126,
325
+ "step": 36
326
+ },
327
+ {
328
+ "epoch": 0.16210295728368018,
329
+ "grad_norm": 0.060268584638834,
330
+ "learning_rate": 4.7750143528405126e-05,
331
+ "loss": 0.0054,
332
+ "step": 37
333
+ },
334
+ {
335
+ "epoch": 0.1664841182913472,
336
+ "grad_norm": 0.05094964802265167,
337
+ "learning_rate": 4.12214747707527e-05,
338
+ "loss": 0.0067,
339
+ "step": 38
340
+ },
341
+ {
342
+ "epoch": 0.17086527929901424,
343
+ "grad_norm": 0.07953735440969467,
344
+ "learning_rate": 3.5055195166981645e-05,
345
+ "loss": 0.0135,
346
+ "step": 39
347
+ },
348
+ {
349
+ "epoch": 0.17524644030668127,
350
+ "grad_norm": 0.07781189680099487,
351
+ "learning_rate": 2.9289321881345254e-05,
352
+ "loss": 0.0124,
353
+ "step": 40
354
+ },
355
+ {
356
+ "epoch": 0.17524644030668127,
357
+ "eval_loss": 0.01449812576174736,
358
+ "eval_runtime": 10.3095,
359
+ "eval_samples_per_second": 9.409,
360
+ "eval_steps_per_second": 4.753,
361
+ "step": 40
362
  }
363
  ],
364
  "logging_steps": 1,
 
378
  "attributes": {}
379
  }
380
  },
381
+ "total_flos": 2.959302797033472e+16,
382
  "train_batch_size": 2,
383
  "trial_name": null,
384
  "trial_params": null