masatochi commited on
Commit
f9d84ac
1 Parent(s): 462d8fd

Training in progress, step 65, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa01364274ded5ec9a4d08f97c52de0069539f51c4125121359aeb7c88679625
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c48988e3503672bcd20efe09c5a1bafa0c5cbe364fd5f649d4030f5561ebd2c3
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8eac30af77f67caccc493ca1ea1cccc925b6e62b50ceaff951fbb31647f07ce1
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63347c4edc0740f2f38642ed559ad0cf3aac574c28ee3881620e2607864a653e
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dfa3332294a25a14a5137921fc31f24176a29def678e629a1769ffeb46f154b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:893e1298e33ca8842633f58141fa81768aef1e0b4a74c16a299e4ee46dc662bf
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78248a64468e8e03af894427063f3f9a858b670b67d13949fb12f06211d294f4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8977565a0b4b10da6945c32ba36caa3dc40fc9a4cdc4d66a537fb19aba96e0c7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.029341646799926645,
5
  "eval_steps": 34,
6
- "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -443,6 +443,41 @@
443
  "learning_rate": 0.00018502171357296144,
444
  "loss": 10.2129,
445
  "step": 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446
  }
447
  ],
448
  "logging_steps": 1,
@@ -462,7 +497,7 @@
462
  "attributes": {}
463
  }
464
  },
465
- "total_flos": 1.3226878055743488e+17,
466
  "train_batch_size": 3,
467
  "trial_name": null,
468
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.03178678403325386,
5
  "eval_steps": 34,
6
+ "global_step": 65,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
443
  "learning_rate": 0.00018502171357296144,
444
  "loss": 10.2129,
445
  "step": 60
446
+ },
447
+ {
448
+ "epoch": 0.02983067424659209,
449
+ "grad_norm": Infinity,
450
+ "learning_rate": 0.00018403440716378928,
451
+ "loss": 8.8496,
452
+ "step": 61
453
+ },
454
+ {
455
+ "epoch": 0.030319701693257536,
456
+ "grad_norm": Infinity,
457
+ "learning_rate": 0.00018301840308155507,
458
+ "loss": 10.0162,
459
+ "step": 62
460
+ },
461
+ {
462
+ "epoch": 0.03080872913992298,
463
+ "grad_norm": 5.941328178675974e+18,
464
+ "learning_rate": 0.00018197404829072215,
465
+ "loss": 9.124,
466
+ "step": 63
467
+ },
468
+ {
469
+ "epoch": 0.03129775658658842,
470
+ "grad_norm": Infinity,
471
+ "learning_rate": 0.00018090169943749476,
472
+ "loss": 9.6824,
473
+ "step": 64
474
+ },
475
+ {
476
+ "epoch": 0.03178678403325386,
477
+ "grad_norm": Infinity,
478
+ "learning_rate": 0.000179801722728024,
479
+ "loss": 9.5723,
480
+ "step": 65
481
  }
482
  ],
483
  "logging_steps": 1,
 
497
  "attributes": {}
498
  }
499
  },
500
+ "total_flos": 1.4329117893722112e+17,
501
  "train_batch_size": 3,
502
  "trial_name": null,
503
  "trial_params": null