pere commited on
Commit
ceb891b
1 Parent(s): 84ead3c

Saving weights and logs of step 1000

Browse files
events.out.tfevents.1642748650.t1v-n-ccbf3e94-w-0.1812628.3.v2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65b2aa750afc378e6c1e0312baa298581ab407c4d4755726b5f00c2b901c6e2b
3
- size 13552832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fda2929c8ecb145d559f38f289ce71595c6bb5bee9a7277c7dead9fee0f92c2
3
+ size 13702044
events.out.tfevents.1642860109.t1v-n-ccbf3e94-w-0.1929147.3.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c16c363c5b73e3712b3357517bdd1f1b30c385011cecdbcc4bd28fa0eafc3c7
3
+ size 146996
events.out.tfevents.1642861064.t1v-n-ccbf3e94-w-0.1931366.3.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d15ef0560e48098ae925c8c8da96ea7f88d33b7468389879f03f587b776c4e8f
3
+ size 147136
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a8dfbba88992df82eaced28bc4267b817d61d09df4f33a3692905bc625c74b5
3
  size 498796983
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57204ed91836522ae201803961e2c406b3c5a6ab32b5fe35434f1d5c3b0d1ec5
3
  size 498796983
run_128_recover_1e.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_mlm_flax.py \
2
+ --output_dir="./" \
3
+ --model_type="roberta" \
4
+ --model_name_or_path="./" \
5
+ --config_name="roberta-base" \
6
+ --tokenizer_name="NbAiLab/nb-roberta-base" \
7
+ --dataset_name="NbAiLab/NCC" \
8
+ --cache_dir="/mnt/disks/flaxdisk/cache/" \
9
+ --max_seq_length="128" \
10
+ --weight_decay="0.01" \
11
+ --per_device_train_batch_size="232" \
12
+ --per_device_eval_batch_size="232" \
13
+ --pad_to_max_length \
14
+ --learning_rate="0.00031997141195461154" \
15
+ --warmup_steps="0" \
16
+ --overwrite_output_dir \
17
+ --num_train_epochs="9" \
18
+ --adam_beta1="0.9" \
19
+ --adam_beta2="0.98" \
20
+ --adam_epsilon="1e-6" \
21
+ --logging_steps="1000" \
22
+ --save_steps="1000" \
23
+ --eval_steps="1000" \
24
+ --auth_token="True" \
25
+ --do_train \
26
+ --do_eval \
27
+ --dtype="bfloat16" \
28
+ --push_to_hub