Femboyuwu2000 commited on
Commit
664a7c8
1 Parent(s): 445db25

Training in progress, step 9280, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:938525068b6affe51d56c10c21cd3683b8cd87b2c9b36021f0fc9f229aa10e06
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d33aa6465f9abb34233b6354f37a051718d38bdd7e741298841e4582d0f142d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fef582d6fb7f736aed00910a585ea555f629e0cab2920275497f4ecfb33fa9c0
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba5204466ae4487e214855180a1192547e4ac27af7a7f63f134519d46ee40145
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c27546fd78d101ec781385e794415edc478f7ce4f562ffe7088df05bb23fa05
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa880c1daade42489548428c80dfc7dd79b5600e67b13b79367956ccd4dd4f41
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa213266e45979e004936b8984e2a9dc7a7156f7190dd7acdf777fd4628091f0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd7e7c0b78a68a269a0c2d97fdccfb6ac7781d12da5817b2cd744268907a9d68
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7408,
5
  "eval_steps": 500,
6
- "global_step": 9260,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3248,6 +3248,13 @@
3248
  "learning_rate": 1.751430386579385e-05,
3249
  "loss": 3.4226,
3250
  "step": 9260
 
 
 
 
 
 
 
3251
  }
3252
  ],
3253
  "logging_steps": 20,
@@ -3255,7 +3262,7 @@
3255
  "num_input_tokens_seen": 0,
3256
  "num_train_epochs": 2,
3257
  "save_steps": 20,
3258
- "total_flos": 2.190296594910413e+16,
3259
  "train_batch_size": 8,
3260
  "trial_name": null,
3261
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7424,
5
  "eval_steps": 500,
6
+ "global_step": 9280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3248
  "learning_rate": 1.751430386579385e-05,
3249
  "loss": 3.4226,
3250
  "step": 9260
3251
+ },
3252
+ {
3253
+ "epoch": 0.74,
3254
+ "grad_norm": 25.349609375,
3255
+ "learning_rate": 1.7466396779212695e-05,
3256
+ "loss": 3.4381,
3257
+ "step": 9280
3258
  }
3259
  ],
3260
  "logging_steps": 20,
 
3262
  "num_input_tokens_seen": 0,
3263
  "num_train_epochs": 2,
3264
  "save_steps": 20,
3265
+ "total_flos": 2.1951059416940544e+16,
3266
  "train_batch_size": 8,
3267
  "trial_name": null,
3268
  "trial_params": null