Hanzalwi commited on
Commit
8781b4d
1 Parent(s): dce76d6

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6855c333ac1c697e6d06a877d9e75bb8a598912c0b6bd089af9789e465ca3dac
3
  size 25191576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58eadfa6617e3e1d0ff3394dcf553eabd47aba1de02ab15fdac8eaab8c08cf66
3
  size 25191576
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e8039c70b87ed8bb202c149c44024b2e5af04921eef6a357d9e91ca56ccf729
3
  size 50444805
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07eeb9ee2ce78c05124fba829a27d09a26082535809e93a9d89c7a7c42331fb5
3
  size 50444805
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf0a8d4e9a6653930f212c4ac0808e06254e1f11ce9e27417a57091c7e3e0282
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0215b9b0c756446a0a4927e0fe6fa21e780ad4150be2177b0182afb17c78ac6a
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f5f3a2d7191245a70e1410d8951fd2f5c94b0b91f13bf39c3f76fe8aaf57fd4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9988044500350952,
3
- "best_model_checkpoint": "./outputs/checkpoint-1100",
4
- "epoch": 1.4666666666666668,
5
  "eval_steps": 100,
6
- "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -161,6 +161,20 @@
161
  "eval_samples_per_second": 5.438,
162
  "eval_steps_per_second": 0.682,
163
  "step": 1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  }
165
  ],
166
  "logging_steps": 100,
@@ -168,7 +182,7 @@
168
  "num_input_tokens_seen": 0,
169
  "num_train_epochs": 3,
170
  "save_steps": 100,
171
- "total_flos": 3.110486213260984e+17,
172
  "trial_name": null,
173
  "trial_params": null
174
  }
 
1
  {
2
+ "best_metric": 0.994540810585022,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1200",
4
+ "epoch": 1.6,
5
  "eval_steps": 100,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
161
  "eval_samples_per_second": 5.438,
162
  "eval_steps_per_second": 0.682,
163
  "step": 1100
164
+ },
165
+ {
166
+ "epoch": 1.6,
167
+ "learning_rate": 0.0002,
168
+ "loss": 0.8584,
169
+ "step": 1200
170
+ },
171
+ {
172
+ "epoch": 1.6,
173
+ "eval_loss": 0.994540810585022,
174
+ "eval_runtime": 353.5912,
175
+ "eval_samples_per_second": 5.458,
176
+ "eval_steps_per_second": 0.684,
177
+ "step": 1200
178
  }
179
  ],
180
  "logging_steps": 100,
 
182
  "num_input_tokens_seen": 0,
183
  "num_train_epochs": 3,
184
  "save_steps": 100,
185
+ "total_flos": 3.392066003002245e+17,
186
  "trial_name": null,
187
  "trial_params": null
188
  }