ncbateman commited on
Commit
4c276bd
1 Parent(s): 8fbb862

Training in progress, step 290, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bff5ea59540fe9486298d16b1c5e07e830cb7454df8e49fb5701941042f38970
3
  size 97307544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7e2e87355763edd3d8d8fc9f16426a52b729ef470fa0ed35fdf6a0d34a1d19e
3
  size 97307544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf8e705be49471bb0c8b7ab4e2654c27c1bbbac7d427de970d6341d4d6a51632
3
  size 49846644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69b0bf7861f08e4352dac63f09fddd6dae58be12ccfacd2673ea034a1c7bbb52
3
  size 49846644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bd08400b897a4c94f407fc6f069e4949e235fcc43163cb517cd39dcd6f04847
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4863ee1f3e4eb3a27956688d9477cddde272e35ff8d013a583ff2d67a2b5271
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00305a50c12f8c5ecbafd69a728a0a7449bc703aaaf50988de0f32f2f4bdb6e8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37ef35c880ce179d87a8b718acaf0bfe99028fc0acc2a163aedf97a2066df701
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.36881268197994177,
5
  "eval_steps": 386,
6
- "global_step": 285,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2010,6 +2010,41 @@
2010
  "learning_rate": 9.906423629072434e-05,
2011
  "loss": 1.0889,
2012
  "step": 285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2013
  }
2014
  ],
2015
  "logging_steps": 1,
@@ -2029,7 +2064,7 @@
2029
  "attributes": {}
2030
  }
2031
  },
2032
- "total_flos": 3.186120939326669e+17,
2033
  "train_batch_size": 4,
2034
  "trial_name": null,
2035
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.37528307990941445,
5
  "eval_steps": 386,
6
+ "global_step": 290,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2010
  "learning_rate": 9.906423629072434e-05,
2011
  "loss": 1.0889,
2012
  "step": 285
2013
+ },
2014
+ {
2015
+ "epoch": 0.3701067615658363,
2016
+ "grad_norm": 0.8891452550888062,
2017
+ "learning_rate": 9.905628060031605e-05,
2018
+ "loss": 0.8847,
2019
+ "step": 286
2020
+ },
2021
+ {
2022
+ "epoch": 0.37140084115173083,
2023
+ "grad_norm": 0.8682214021682739,
2024
+ "learning_rate": 9.904829155617945e-05,
2025
+ "loss": 0.9311,
2026
+ "step": 287
2027
+ },
2028
+ {
2029
+ "epoch": 0.3726949207376254,
2030
+ "grad_norm": 1.2218021154403687,
2031
+ "learning_rate": 9.904026916374636e-05,
2032
+ "loss": 0.92,
2033
+ "step": 288
2034
+ },
2035
+ {
2036
+ "epoch": 0.3739890003235199,
2037
+ "grad_norm": 1.0069034099578857,
2038
+ "learning_rate": 9.903221342847125e-05,
2039
+ "loss": 1.0061,
2040
+ "step": 289
2041
+ },
2042
+ {
2043
+ "epoch": 0.37528307990941445,
2044
+ "grad_norm": 0.7723405361175537,
2045
+ "learning_rate": 9.902412435583128e-05,
2046
+ "loss": 0.8627,
2047
+ "step": 290
2048
  }
2049
  ],
2050
  "logging_steps": 1,
 
2064
  "attributes": {}
2065
  }
2066
  },
2067
+ "total_flos": 3.242017797911347e+17,
2068
  "train_batch_size": 4,
2069
  "trial_name": null,
2070
  "trial_params": null