Femboyuwu2000 commited on
Commit
17d15ab
1 Parent(s): eb813b3

Training in progress, step 9260, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7fe8097e3bf3e1d6200427c42cdec0c2208ac0a54599f9e26cee7d2c0f344fc
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:938525068b6affe51d56c10c21cd3683b8cd87b2c9b36021f0fc9f229aa10e06
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3eaf1fbcdbde0059ee1f06c827026e1b557db2f3b07d9f7936261fe98f775bdb
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef582d6fb7f736aed00910a585ea555f629e0cab2920275497f4ecfb33fa9c0
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e15b7ccc90968bcd87b2edf84a9cd99b1e92aa9a9e59ab6ccd2cb1a860be1afb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c27546fd78d101ec781385e794415edc478f7ce4f562ffe7088df05bb23fa05
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63037139d6ccc8c3ea7fa5702f09392e604fb2ed06455649be80ce23f1ff1ce0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa213266e45979e004936b8984e2a9dc7a7156f7190dd7acdf777fd4628091f0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7392,
5
  "eval_steps": 500,
6
- "global_step": 9240,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3241,6 +3241,13 @@
3241
  "learning_rate": 1.7562184578517207e-05,
3242
  "loss": 3.4478,
3243
  "step": 9240
 
 
 
 
 
 
 
3244
  }
3245
  ],
3246
  "logging_steps": 20,
@@ -3248,7 +3255,7 @@
3248
  "num_input_tokens_seen": 0,
3249
  "num_train_epochs": 2,
3250
  "save_steps": 20,
3251
- "total_flos": 2.184768798641357e+16,
3252
  "train_batch_size": 8,
3253
  "trial_name": null,
3254
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7408,
5
  "eval_steps": 500,
6
+ "global_step": 9260,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3241
  "learning_rate": 1.7562184578517207e-05,
3242
  "loss": 3.4478,
3243
  "step": 9240
3244
+ },
3245
+ {
3246
+ "epoch": 0.74,
3247
+ "grad_norm": 22.82186508178711,
3248
+ "learning_rate": 1.751430386579385e-05,
3249
+ "loss": 3.4226,
3250
+ "step": 9260
3251
  }
3252
  ],
3253
  "logging_steps": 20,
 
3255
  "num_input_tokens_seen": 0,
3256
  "num_train_epochs": 2,
3257
  "save_steps": 20,
3258
+ "total_flos": 2.190296594910413e+16,
3259
  "train_batch_size": 8,
3260
  "trial_name": null,
3261
  "trial_params": null