Femboyuwu2000 commited on
Commit
114aab2
1 Parent(s): 1409be2

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c05b93acda6a7b2f81f0afd85f5badfd9cfa3a6e9e1606092480a0fbff648c88
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:888d6b2d183be0ad051b69e15bacc2f610a2138674c1cefb759c0b5ea44fb84b
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1a8fd6c5ca33512dfe3433cf2f0529599bf80a58b48b2d72c40c244f1ff4d38
3
  size 2423738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da76e9afdf003a898a487820cd7ea1d43e840d64f8865874b93117bab741801
3
  size 2423738
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87627699ac0553b87adb61769764b7859db5e6139e1fca8eca54989ab707ce0d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d42be630248a5213a70ba5e163d2391a05816dbb273d1b7b48dcd47e871fb4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba9088914de78dc99d767c21058d110a360c0bb50564c9ed4886490601d97b6d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da2b3df137afc8ce0bf3abe9574aa4eb50b6c5bd11e6952c66c439eade1cc134
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.1130100472399276e-05,
5
  "eval_steps": 500,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -21,6 +21,13 @@
21
  "learning_rate": 3.2500000000000002e-06,
22
  "loss": 3.6056,
23
  "step": 40
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 20,
@@ -28,7 +35,7 @@
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 1,
30
  "save_steps": 20,
31
- "total_flos": 24304518549504.0,
32
  "train_batch_size": 1,
33
  "trial_name": null,
34
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.669515070859891e-05,
5
  "eval_steps": 500,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
21
  "learning_rate": 3.2500000000000002e-06,
22
  "loss": 3.6056,
23
  "step": 40
24
+ },
25
+ {
26
+ "epoch": 0.0,
27
+ "grad_norm": 1.030365228652954,
28
+ "learning_rate": 4.9166666666666665e-06,
29
+ "loss": 3.526,
30
+ "step": 60
31
  }
32
  ],
33
  "logging_steps": 20,
 
35
  "num_input_tokens_seen": 0,
36
  "num_train_epochs": 1,
37
  "save_steps": 20,
38
+ "total_flos": 35326762795008.0,
39
  "train_batch_size": 1,
40
  "trial_name": null,
41
  "trial_params": null