masatochi commited on
Commit
e3b1682
1 Parent(s): ab06fa1

Training in progress, step 95, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c33b6f41ff4ceb62efc4638d195dfc5b077d4e62d14f715b777185cfd64bb8a
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61aece05d6fca5a9a455223f54d11788fb69baa3a1bb939dcd1217cf74f42a8c
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bdea95794d41522f6b051e8432670ba48b0134b62bcbf42c14086457fcc86d03
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:595168fc88acc93538dac96ad7075991c7f5e24a1f60aa1e44636d2f2ebc40fe
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d7e9b37fba6634a583fc8b1bf58a4648f316317630483e214e684618a5aa0cb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14a47bb6c7de5708f6f2b75126905ee2455abe93376f11950e796f2aa144c3bf
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfdc0543e9ce40f0d0b0ee9752d10d130598c759cc5a2bd973736f6096894d17
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3160dd3097641f3bf4d4036c0ddfd8673184925120f088b5ebecc6a1e5c953dd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.04401247019988997,
5
  "eval_steps": 34,
6
- "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -661,6 +661,41 @@
661
  "learning_rate": 0.00014457383557765386,
662
  "loss": 9.7034,
663
  "step": 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
  }
665
  ],
666
  "logging_steps": 1,
@@ -680,7 +715,7 @@
680
  "attributes": {}
681
  }
682
  },
683
- "total_flos": 1.9840317083615232e+17,
684
  "train_batch_size": 3,
685
  "trial_name": null,
686
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04645760743321719,
5
  "eval_steps": 34,
6
+ "global_step": 95,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
661
  "learning_rate": 0.00014457383557765386,
662
  "loss": 9.7034,
663
  "step": 90
664
+ },
665
+ {
666
+ "epoch": 0.044501497646555414,
667
+ "grad_norm": 1.0282610752728596e+19,
668
+ "learning_rate": 0.0001429120608772609,
669
+ "loss": 9.0363,
670
+ "step": 91
671
+ },
672
+ {
673
+ "epoch": 0.04499052509322086,
674
+ "grad_norm": Infinity,
675
+ "learning_rate": 0.00014123563174739037,
676
+ "loss": 9.3536,
677
+ "step": 92
678
+ },
679
+ {
680
+ "epoch": 0.0454795525398863,
681
+ "grad_norm": Infinity,
682
+ "learning_rate": 0.00013954512068705424,
683
+ "loss": 10.7834,
684
+ "step": 93
685
+ },
686
+ {
687
+ "epoch": 0.045968579986551746,
688
+ "grad_norm": Infinity,
689
+ "learning_rate": 0.00013784110500423104,
690
+ "loss": 9.8873,
691
+ "step": 94
692
+ },
693
+ {
694
+ "epoch": 0.04645760743321719,
695
+ "grad_norm": Infinity,
696
+ "learning_rate": 0.00013612416661871533,
697
+ "loss": 8.2064,
698
+ "step": 95
699
  }
700
  ],
701
  "logging_steps": 1,
 
715
  "attributes": {}
716
  }
717
  },
718
+ "total_flos": 2.0942556921593856e+17,
719
  "train_batch_size": 3,
720
  "trial_name": null,
721
  "trial_params": null