Femboyuwu2000 commited on
Commit
91a805e
·
verified ·
1 Parent(s): b4c2736

Training in progress, step 5720, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59d1f0fae334930ed8079d00f44c27d89643fc0441c8182fd6c384af44315cb0
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:544a421be52957d5613621601224858dfd162233e6428573b09ec9d5573dc77a
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbf70e69484ef4930af6adb3972e5d151c2e40923012c6e59178aa5b08fc0f62
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e2d9f85dd6f1706fbaa505cbb386e14ca466c6dd237557e0df4b31602f92b2
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e9d9c35639598e4263e7ecc7045b5427db463c314e637805772291b54322a1a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cad25d74c98e57d04d591e95b20be1467f4abee9743346612ed99c4d529ebe19
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d31553c5ae8347a198cb4d56280c26db3af2cfa5039328d122439d6a52e0f08
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bad31e19ad1444caf40fc8eed23cdbd3a28f5e4fbb3a28184d66d17da83ca16
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.456,
5
  "eval_steps": 500,
6
- "global_step": 5700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2002,6 +2002,13 @@
2002
  "learning_rate": 2.516862579011255e-05,
2003
  "loss": 3.3665,
2004
  "step": 5700
 
 
 
 
 
 
 
2005
  }
2006
  ],
2007
  "logging_steps": 20,
@@ -2009,7 +2016,7 @@
2009
  "num_input_tokens_seen": 0,
2010
  "num_train_epochs": 2,
2011
  "save_steps": 20,
2012
- "total_flos": 1.3500387561013248e+16,
2013
  "train_batch_size": 8,
2014
  "trial_name": null,
2015
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4576,
5
  "eval_steps": 500,
6
+ "global_step": 5720,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2002
  "learning_rate": 2.516862579011255e-05,
2003
  "loss": 3.3665,
2004
  "step": 5700
2005
+ },
2006
+ {
2007
+ "epoch": 0.46,
2008
+ "grad_norm": 28.95624542236328,
2009
+ "learning_rate": 2.5132858131187446e-05,
2010
+ "loss": 3.4688,
2011
+ "step": 5720
2012
  }
2013
  ],
2014
  "logging_steps": 20,
 
2016
  "num_input_tokens_seen": 0,
2017
  "num_train_epochs": 2,
2018
  "save_steps": 20,
2019
+ "total_flos": 1.3547988940161024e+16,
2020
  "train_batch_size": 8,
2021
  "trial_name": null,
2022
  "trial_params": null