Femboyuwu2000 commited on
Commit
4b7d677
1 Parent(s): 1fa5715

Training in progress, step 6280, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e5f6360dfa9c2a789402315eb51b17d5c5437a5e36f3a7f4a8ff1180c978bd7
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4952d07d35404125ac82a69ff1d2b691b555ce36f8f5e2491332621aa761003d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef67c02a39ed9ea9db9f9f9072b9280130156e0f2c168f6d3433e11a691906a0
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dfa581ec2b543e12a4d9e7daa6a74d185441160dd1412bf1875fce58548b340
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:059a6e0450ff9489b2174c9501bf25eb13b58d453dbd5dc4fed1c213de807539
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20249241b6c1615d63c46164c68a0aefa907c40365a5dc082bbe417fdd12bf6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1291563f0ece66a71bc509a68887ccf6e08065ec53a4ab16c369d685623ef0fa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac175a77f9ab524825431337f25ea7bdd98b98a7ae16ff30a1d8c19f59c4b3f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4928,
5
  "eval_steps": 500,
6
- "global_step": 6160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2163,6 +2163,48 @@
2163
  "learning_rate": 2.431975467831853e-05,
2164
  "loss": 3.4445,
2165
  "step": 6160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2166
  }
2167
  ],
2168
  "logging_steps": 20,
@@ -2170,7 +2212,7 @@
2170
  "num_input_tokens_seen": 0,
2171
  "num_train_epochs": 2,
2172
  "save_steps": 20,
2173
- "total_flos": 1.4563889634902016e+16,
2174
  "train_batch_size": 8,
2175
  "trial_name": null,
2176
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5024,
5
  "eval_steps": 500,
6
+ "global_step": 6280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2163
  "learning_rate": 2.431975467831853e-05,
2164
  "loss": 3.4445,
2165
  "step": 6160
2166
+ },
2167
+ {
2168
+ "epoch": 0.49,
2169
+ "grad_norm": 39.8357048034668,
2170
+ "learning_rate": 2.4281639511169457e-05,
2171
+ "loss": 3.5702,
2172
+ "step": 6180
2173
+ },
2174
+ {
2175
+ "epoch": 0.5,
2176
+ "grad_norm": 31.746124267578125,
2177
+ "learning_rate": 2.424342698401391e-05,
2178
+ "loss": 3.4539,
2179
+ "step": 6200
2180
+ },
2181
+ {
2182
+ "epoch": 0.5,
2183
+ "grad_norm": 53.038482666015625,
2184
+ "learning_rate": 2.4205117497683213e-05,
2185
+ "loss": 3.5491,
2186
+ "step": 6220
2187
+ },
2188
+ {
2189
+ "epoch": 0.5,
2190
+ "grad_norm": 28.155752182006836,
2191
+ "learning_rate": 2.4166711454025754e-05,
2192
+ "loss": 3.4353,
2193
+ "step": 6240
2194
+ },
2195
+ {
2196
+ "epoch": 0.5,
2197
+ "grad_norm": 42.623130798339844,
2198
+ "learning_rate": 2.4128209255902753e-05,
2199
+ "loss": 3.4348,
2200
+ "step": 6260
2201
+ },
2202
+ {
2203
+ "epoch": 0.5,
2204
+ "grad_norm": 44.352596282958984,
2205
+ "learning_rate": 2.408961130718405e-05,
2206
+ "loss": 3.4637,
2207
+ "step": 6280
2208
  }
2209
  ],
2210
  "logging_steps": 20,
 
2212
  "num_input_tokens_seen": 0,
2213
  "num_train_epochs": 2,
2214
  "save_steps": 20,
2215
+ "total_flos": 1.4845134723416064e+16,
2216
  "train_batch_size": 8,
2217
  "trial_name": null,
2218
  "trial_params": null