Saving weights and logs of step 1000
Browse files- events.out.tfevents.1635838353.t1v-n-f6f5b6cc-w-0.849668.0.v2 +3 -0
- flax_model.msgpack +1 -1
- merges.txt +0 -0
- run.sh +1 -1
events.out.tfevents.1635838353.t1v-n-f6f5b6cc-w-0.849668.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:940b0993bc86940ea93e9db37903fe45c989201a9f7f2d5e0b04674d946ddc8b
|
3 |
+
size 147207
|
flax_model.msgpack
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 497764120
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28ca6cf8ae91921d3bbdd2181a884ee4416f6c680529d3e228347d56e886c020
|
3 |
size 497764120
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run.sh
CHANGED
@@ -15,7 +15,7 @@ python run_clm_flax.py \
|
|
15 |
--warmup_steps="1000" \
|
16 |
--adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \
|
17 |
--overwrite_output_dir \
|
18 |
-
--num_train_epochs="
|
19 |
--logging_steps="500" \
|
20 |
--save_steps="1000" \
|
21 |
--eval_steps="1000" \
|
|
|
15 |
--warmup_steps="1000" \
|
16 |
--adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \
|
17 |
--overwrite_output_dir \
|
18 |
+
--num_train_epochs="30" \
|
19 |
--logging_steps="500" \
|
20 |
--save_steps="1000" \
|
21 |
--eval_steps="1000" \
|