MohamedAhmedAE commited on
Commit
b75a263
1 Parent(s): 5dc0d5e

Training in progress, step 5000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8136d419b8890434688f361ff9055f226cad86b60136d37a16ab511ecdb95732
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3898deded357ae3f624f6da11d5a240cdabb03ebb6c20aaa1514ff2afc28f42
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ae7ca943292c1885fd2fa8fc4a0d7048e58e847fa6978c2dee6a1669f1d7daf
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6589201aabf4474a11e89f074f30710b9c14f130975414126d44fac37ab285
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:450d4161861142b0c607b082cb7975db64bce31f3c9c4ed471faa6a87cc3c651
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2a46d21812f033b59f8253d10c55fd001488b63093ce2c9a28dd3b2087601c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf6116349e1acbc51c06feeb07eabf38596d13779c8b6455b06cf034d85b895b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadb6c8212cca45778812f2e418e14e8bb1e227cbf578c8c3f0b8b32adfef7b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003569582693473241,
5
  "eval_steps": 2000,
6
- "global_step": 4800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -175,6 +175,13 @@
175
  "learning_rate": 1.9999974953119e-05,
176
  "loss": 1.597,
177
  "step": 4800
 
 
 
 
 
 
 
178
  }
179
  ],
180
  "logging_steps": 200,
@@ -182,7 +189,7 @@
182
  "num_input_tokens_seen": 0,
183
  "num_train_epochs": 5,
184
  "save_steps": 200,
185
- "total_flos": 6.297651302741606e+16,
186
  "train_batch_size": 1,
187
  "trial_name": null,
188
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0037183153057012927,
5
  "eval_steps": 2000,
6
+ "global_step": 5000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
175
  "learning_rate": 1.9999974953119e-05,
176
  "loss": 1.597,
177
  "step": 4800
178
+ },
179
+ {
180
+ "epoch": 0.0,
181
+ "grad_norm": 2.4163918495178223,
182
+ "learning_rate": 1.999997281785647e-05,
183
+ "loss": 1.5405,
184
+ "step": 5000
185
  }
186
  ],
187
  "logging_steps": 200,
 
189
  "num_input_tokens_seen": 0,
190
  "num_train_epochs": 5,
191
  "save_steps": 200,
192
+ "total_flos": 6.541947938699674e+16,
193
  "train_batch_size": 1,
194
  "trial_name": null,
195
  "trial_params": null