bhuvanmdev
commited on
Commit
•
95ae1c8
1
Parent(s):
bbbe232
Training in progress, step 260, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100697728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2136a266464d0b9812183c2c7ec272f51ec8b4b2a73d98b99839dce69364c4ad
|
3 |
size 100697728
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201541754
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b78d6de461909c4794f6a8ea0bace8d45003ec511a44c6c02656a3209c5b403
|
3 |
size 201541754
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93d073fb3e4a146a793ea03f10d54796a90197d4dfbe3f76b45137b6f4230b74
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12e5cd7fc3da76dcdde9887af968b8b3aba1c1bb4ddd30a416d2f9a0e9367d84
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -199,14 +199,30 @@
|
|
199 |
"loss": 0.446,
|
200 |
"num_input_tokens_seen": 162267,
|
201 |
"step": 240
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
}
|
203 |
],
|
204 |
"logging_steps": 10,
|
205 |
"max_steps": 2795,
|
206 |
-
"num_input_tokens_seen":
|
207 |
"num_train_epochs": 1,
|
208 |
"save_steps": 20,
|
209 |
-
"total_flos":
|
210 |
"train_batch_size": 1,
|
211 |
"trial_name": null,
|
212 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.09302325581395349,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 260,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
199 |
"loss": 0.446,
|
200 |
"num_input_tokens_seen": 162267,
|
201 |
"step": 240
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"epoch": 0.08944543828264759,
|
205 |
+
"grad_norm": 0.360166996717453,
|
206 |
+
"learning_rate": 0.0001821109123434705,
|
207 |
+
"loss": 0.4549,
|
208 |
+
"num_input_tokens_seen": 168817,
|
209 |
+
"step": 250
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"epoch": 0.09302325581395349,
|
213 |
+
"grad_norm": 0.342385470867157,
|
214 |
+
"learning_rate": 0.0001813953488372093,
|
215 |
+
"loss": 0.4297,
|
216 |
+
"num_input_tokens_seen": 174828,
|
217 |
+
"step": 260
|
218 |
}
|
219 |
],
|
220 |
"logging_steps": 10,
|
221 |
"max_steps": 2795,
|
222 |
+
"num_input_tokens_seen": 174828,
|
223 |
"num_train_epochs": 1,
|
224 |
"save_steps": 20,
|
225 |
+
"total_flos": 3931264333799424.0,
|
226 |
"train_batch_size": 1,
|
227 |
"trial_name": null,
|
228 |
"trial_params": null
|