aapot commited on
Commit
cf40949
1 Parent(s): 5f9fdcd

Saving weights and logs of step 10000

Browse files
events.out.tfevents.1629902662.t1v-n-1ae8dadb-w-0.181842.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:830bfc14ddfb9e53f03a7c79030b4fc5ddee472ee809578fca2e3d227a8cc3c7
3
+ size 1470757
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:314d30f9c83fc85ad87e87e0d996870c813bd69c8c04678a1aadeae47b00397c
3
  size 711588089
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8274c4dd16c72c5f36f1e4441851a0dd44dd2f1fe464977bbf43174e6b6a12
3
  size 711588089
start_train.sh CHANGED
@@ -10,8 +10,8 @@ python3 run_mlm_flax.py \
10
  --max_seq_length="128" \
11
  --pad_to_max_length \
12
  --preprocessing_num_workers="96" \
13
- --per_device_train_batch_size="128" \
14
- --per_device_eval_batch_size="128" \
15
  --learning_rate="3e-4" \
16
  --warmup_steps="1000" \
17
  --overwrite_output_dir \
@@ -22,5 +22,4 @@ python3 run_mlm_flax.py \
22
  --eval_steps="10000" \
23
  --logging_steps="1000" \
24
  --dtype="bfloat16" \
25
- --adafactor \
26
  --push_to_hub
 
10
  --max_seq_length="128" \
11
  --pad_to_max_length \
12
  --preprocessing_num_workers="96" \
13
+ --per_device_train_batch_size="64" \
14
+ --per_device_eval_batch_size="64" \
15
  --learning_rate="3e-4" \
16
  --warmup_steps="1000" \
17
  --overwrite_output_dir \
 
22
  --eval_steps="10000" \
23
  --logging_steps="1000" \
24
  --dtype="bfloat16" \
 
25
  --push_to_hub