yhavinga commited on
Commit
66661d5
1 Parent(s): a1155d2

Saving weights at step 180k, loss 1.489, acc 0.684

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": ".",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1920,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.0,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "num_decoder_layers": 24,
18
+ "num_heads": 8,
19
+ "num_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_num_buckets": 32,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.17.0",
26
+ "use_cache": true,
27
+ "vocab_size": 32103
28
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0af4cca6d7da446582149b32ad4a41a8a1cba79ebc211a301aa10c33efbffe9
3
+ size 999990807
info.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ INFO:__main__: Optimizer = adafactor
2
+ INFO:__main__: Learning rate (peak) = 0.005
3
+ INFO:__main__: Num examples = 94558172
4
+ INFO:__main__: Num tokenized group examples 109037136
5
+ INFO:__main__: Num Epochs = 1
6
+ INFO:__main__: Instantaneous batch size per device = 16
7
+ INFO:__main__: Total train batch size (w. parallel & grad accum) = 128
8
+ INFO:__main__: Steps per epoch = 851852
9
+ INFO:__main__: Total optimization steps = 851852
opt_state.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b35e578bdb0696cf36739fc3289200fbee7dee13f505d396cb5b6caf7907bf72
3
+ size 3156539
run.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export HF_PROJECT="t5-small-24L-dutch-english"
2
+ export DATASET="yhavinga/mc4_nl_cleaned" # Name of the dataset in the Huggingface Hub
3
+ export DATASET_CONFIG="large_en_nl" # Config of the dataset in the Huggingface Hub
4
+ export DATASET_SPLIT="train" # Split to use for training tokenizer and model
5
+ export CONFIG_NAME="../config/config-small-24L.json"
6
+ export TOKENIZER_NAME="yhavinga/net5-v1.1-base-cased-500"
7
+ export MODEL_PATH="${HOME}/data/${HF_PROJECT}" # Path to the model
8
+
9
+ python3 ../train/run_t5_mlm_flax_pmap.py \
10
+ --output_dir="${MODEL_PATH}" \
11
+ --model_type="t5" \
12
+ --config_name="${CONFIG_NAME}" \
13
+ --tokenizer_name="${TOKENIZER_NAME}" \
14
+ --auth_token="$(cat ~/.huggingface/token)" \
15
+ --preprocessing_num_workers="96" \
16
+ --do_train --do_eval \
17
+ --dataset_name="${DATASET}" \
18
+ --dataset_config_name="${DATASET_CONFIG}" \
19
+ --max_seq_length="512" \
20
+ --per_device_train_batch_size="16" \
21
+ --per_device_eval_batch_size="32" \
22
+ --dtype="float32" \
23
+ --optim="adafactor" \
24
+ --learning_rate="0.005" \
25
+ --lr_decay="linear" \
26
+ --overwrite_output_dir \
27
+ --num_train_epochs="1" \
28
+ --logging_steps="100" \
29
+ --save_steps="5000" \
30
+ --eval_steps="5000" \
31
+ --warmup_steps="20000" \
32
+ --validation_split_count="15000" \
33
+ --wandb_project="t5-small-24L-dutch-english" \
34
+ --wandb_job_type="pmap"
35
+
36
+ # \
37
+ # --resume_from_checkpoint="${MODEL_PATH}"
38
+ # --lr_decay="exponential" \
39
+ # --lr_transition_steps="200000" \
40
+ # --lr_decay_rate="0.7" \
41
+ # --lr_staircase="false" \
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": null, "name_or_path": "yhavinga/net5-v1.1-base-cased-500", "tokenizer_class": "T5Tokenizer"}
training_state.json ADDED
@@ -0,0 +1 @@
 
1
+ {"step": 180001}