pere commited on
Commit
eff8801
1 Parent(s): dc30f42

Commit from model create scripts

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.gin ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __gin__ import dynamic_registration
2
+ import __main__ as train_script
3
+ import seqio
4
+ import t5.data.mixtures
5
+ from t5x import adafactor
6
+ from t5x.examples.t5 import network
7
+ from t5x import gin_utils
8
+ from t5x import models
9
+ from t5x import partitioning
10
+ from t5x import trainer
11
+ from t5x import utils
12
+ import tasks
13
+
14
+ # Macros:
15
+ # ==============================================================================
16
+ BATCH_SIZE = 128
17
+ DROPOUT_RATE = 0.0
18
+ INITIAL_CHECKPOINT_PATH = \
19
+ 'gs://t5-data/pretrained_models/t5x/mt5_base/checkpoint_1000000'
20
+ LABEL_SMOOTHING = 0.0
21
+ LOSS_NORMALIZING_FACTOR = None
22
+ MIXTURE_OR_TASK_MODULE = None
23
+ MIXTURE_OR_TASK_NAME = 'ncc_english_span_corruption_stream'
24
+ MODEL = @models.EncoderDecoderModel()
25
+ MODEL_DIR = 'gs://nb-t5x-us-central2/norwegian_NCC_plus_English_t5x_base'
26
+ OPTIMIZER = @adafactor.Adafactor()
27
+ RANDOM_SEED = None
28
+ SHUFFLE_TRAIN_EXAMPLES = True
29
+ TASK_FEATURE_LENGTHS = {'inputs': 512, 'targets': 512}
30
+ TRAIN_STEPS = 1500000
31
+ USE_CACHED_TASKS = True
32
+ USE_HARDWARE_RNG = False
33
+ VOCABULARY = @seqio.SentencePieceVocabulary()
34
+ Z_LOSS = 0.0001
35
+
36
+ # Parameters for adafactor.Adafactor:
37
+ # ==============================================================================
38
+ adafactor.Adafactor.decay_rate = 0.8
39
+ adafactor.Adafactor.logical_factor_rules = \
40
+ @adafactor.standard_logical_factor_rules()
41
+ adafactor.Adafactor.step_offset = 0
42
+
43
+ # Parameters for utils.CheckpointConfig:
44
+ # ==============================================================================
45
+ utils.CheckpointConfig.restore = @utils.RestoreCheckpointConfig()
46
+ utils.CheckpointConfig.save = @utils.SaveCheckpointConfig()
47
+
48
+ # Parameters for utils.create_learning_rate_scheduler:
49
+ # ==============================================================================
50
+ utils.create_learning_rate_scheduler.base_learning_rate = 0.5
51
+ utils.create_learning_rate_scheduler.factors = 'constant * rsqrt_decay'
52
+ utils.create_learning_rate_scheduler.warmup_steps = 10000
53
+
54
+ # Parameters for train/utils.DatasetConfig:
55
+ # ==============================================================================
56
+ train/utils.DatasetConfig.batch_size = %BATCH_SIZE
57
+ train/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
58
+ train/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
59
+ train/utils.DatasetConfig.pack = True
60
+ train/utils.DatasetConfig.seed = None
61
+ train/utils.DatasetConfig.shuffle = %SHUFFLE_TRAIN_EXAMPLES
62
+ train/utils.DatasetConfig.split = 'train'
63
+ train/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
64
+ train/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
65
+
66
+ # Parameters for train_eval/utils.DatasetConfig:
67
+ # ==============================================================================
68
+ train_eval/utils.DatasetConfig.batch_size = %BATCH_SIZE
69
+ train_eval/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
70
+ train_eval/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
71
+ train_eval/utils.DatasetConfig.pack = True
72
+ train_eval/utils.DatasetConfig.seed = 42
73
+ train_eval/utils.DatasetConfig.shuffle = False
74
+ train_eval/utils.DatasetConfig.split = 'validation'
75
+ train_eval/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
76
+ train_eval/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
77
+
78
+ # Parameters for models.EncoderDecoderModel:
79
+ # ==============================================================================
80
+ models.EncoderDecoderModel.input_vocabulary = %VOCABULARY
81
+ models.EncoderDecoderModel.label_smoothing = %LABEL_SMOOTHING
82
+ models.EncoderDecoderModel.loss_normalizing_factor = %LOSS_NORMALIZING_FACTOR
83
+ models.EncoderDecoderModel.module = @network.Transformer()
84
+ models.EncoderDecoderModel.optimizer_def = %OPTIMIZER
85
+ models.EncoderDecoderModel.output_vocabulary = %VOCABULARY
86
+ models.EncoderDecoderModel.z_loss = %Z_LOSS
87
+
88
+ # Parameters for partitioning.PjitPartitioner:
89
+ # ==============================================================================
90
+ partitioning.PjitPartitioner.logical_axis_rules = \
91
+ @partitioning.standard_logical_axis_rules()
92
+ partitioning.PjitPartitioner.model_parallel_submesh = None
93
+ partitioning.PjitPartitioner.num_partitions = 4
94
+
95
+ # Parameters for utils.RestoreCheckpointConfig:
96
+ # ==============================================================================
97
+ utils.RestoreCheckpointConfig.dtype = 'float32'
98
+ utils.RestoreCheckpointConfig.mode = 'specific'
99
+ utils.RestoreCheckpointConfig.path = %INITIAL_CHECKPOINT_PATH
100
+
101
+ # Parameters for utils.SaveCheckpointConfig:
102
+ # ==============================================================================
103
+ utils.SaveCheckpointConfig.dtype = 'float32'
104
+ utils.SaveCheckpointConfig.keep = None
105
+ utils.SaveCheckpointConfig.period = 1000
106
+ utils.SaveCheckpointConfig.save_dataset = False
107
+
108
+ # Parameters for seqio.SentencePieceVocabulary:
109
+ # ==============================================================================
110
+ seqio.SentencePieceVocabulary.sentencepiece_model_file = \
111
+ 'gs://t5-data/vocabs/mc4.250000.100extra/sentencepiece.model'
112
+
113
+ # Parameters for network.T5Config:
114
+ # ==============================================================================
115
+ network.T5Config.dropout_rate = %DROPOUT_RATE
116
+ network.T5Config.dtype = 'bfloat16'
117
+ network.T5Config.emb_dim = 768
118
+ network.T5Config.head_dim = 64
119
+ network.T5Config.logits_via_embedding = False
120
+ network.T5Config.mlp_activations = ('gelu', 'linear')
121
+ network.T5Config.mlp_dim = 2048
122
+ network.T5Config.num_decoder_layers = 12
123
+ network.T5Config.num_encoder_layers = 12
124
+ network.T5Config.num_heads = 12
125
+ network.T5Config.vocab_size = 250112
126
+
127
+ # Parameters for train_script.train:
128
+ # ==============================================================================
129
+ train_script.train.checkpoint_cfg = @utils.CheckpointConfig()
130
+ train_script.train.eval_period = 1000
131
+ train_script.train.eval_steps = 20
132
+ train_script.train.infer_eval_dataset_cfg = None
133
+ train_script.train.model = %MODEL
134
+ train_script.train.model_dir = %MODEL_DIR
135
+ train_script.train.partitioner = @partitioning.PjitPartitioner()
136
+ train_script.train.random_seed = %RANDOM_SEED
137
+ train_script.train.summarize_config_fn = @gin_utils.summarize_gin_config
138
+ train_script.train.total_steps = %TRAIN_STEPS
139
+ train_script.train.train_dataset_cfg = @train/utils.DatasetConfig()
140
+ train_script.train.train_eval_dataset_cfg = @train_eval/utils.DatasetConfig()
141
+ train_script.train.trainer_cls = @trainer.Trainer
142
+ train_script.train.use_hardware_rng = %USE_HARDWARE_RNG
143
+
144
+ # Parameters for trainer.Trainer:
145
+ # ==============================================================================
146
+ trainer.Trainer.learning_rate_fn = @utils.create_learning_rate_scheduler()
147
+ trainer.Trainer.num_microbatches = None
148
+
149
+ # Parameters for network.Transformer:
150
+ # ==============================================================================
151
+ network.Transformer.config = @network.T5Config()
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/patrick/hugging_face/t5/mt5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "num_decoder_layers": 12,
18
+ "num_heads": 12,
19
+ "num_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_max_distance": 128,
23
+ "relative_attention_num_buckets": 32,
24
+ "tie_word_embeddings": false,
25
+ "tokenizer_class": "T5Tokenizer",
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.19.2",
28
+ "use_cache": true,
29
+ "vocab_size": 250112
30
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf2aa12e9aff5e8ab5f8a4acc72191cfc1c16d62f1e7c857c6582f58d6266c9d
3
+ size 2329617315
model-info.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b5fb9fc9f1e8db5af8b67ed9408cb4fa0201245261c52af7474ab3e22e766a9
3
+ size 2329696333
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13edf9346263c231557abbd4b42fd847571a957f938e4f48b2d23ec2f1c58acc
3
+ size 9760
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93c3578052e1605d8332eb961bc08d72e246071974e4cc54aa6991826b802aa5
3
+ size 16330369
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 0, "additional_special_tokens": null, "special_tokens_map_file": "/home/patrick/.cache/torch/transformers/685ac0ca8568ec593a48b61b0a3c272beee9bc194a3c7241d15dcadb5f875e53.f76030f3ec1b96a8199b2593390c610e76ca8028ef3d24680000619ffb646276", "name_or_path": "/home/perk/models/t5_base_NCC", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}
train/events.out.tfevents.1649362222.t1v-n-94d01c37-w-3.827182.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b5f4421b7211a0376fa66ae305adfd3b460a7787941f5c3a90c4b451f20a962
3
+ size 6406
train/events.out.tfevents.1649395704.t1v-n-94d01c37-w-3.956965.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e29d45b468231e0324e55435e46c3cf9ac78213bfbe76d10a58203e240cc4b
3
+ size 377495
train/events.out.tfevents.1649485192.t1v-n-94d01c37-w-3.1859834.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f616a96ba153eb5c48c0e280dd79332617ff8a3b60f093ce81d607539d92c3c
3
+ size 26803
train/events.out.tfevents.1649496451.t1v-n-94d01c37-w-3.1935179.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a18cd43f7f7cc56e99520e57c7bd8696c0668ccc83591ffa2a23bac7677145d3
3
+ size 114583
train/events.out.tfevents.1649516926.t1v-n-94d01c37-w-3.2175409.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5984ce17ac144f7cb642308957175826d031ee7b27a1373a8ee61b849aa13129
3
+ size 6406
train/events.out.tfevents.1649517323.t1v-n-94d01c37-w-3.2180017.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3937b812b026bc38e782910739daceefdcd60d3f7074a7f456fe07744f15904
3
+ size 241949
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649362223.t1v-n-94d01c37-w-3.827182.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a20ac495b312cab31afeaed8b2ec16ba30d936370ac0699f053719a3d2f7d235
3
+ size 40
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649395704.t1v-n-94d01c37-w-3.956965.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6601f76cc4016ce4aa9c4c230f680eb3f9efdf02b4a7d29fd2e472f7a9ceb7b0
3
+ size 330291
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649485192.t1v-n-94d01c37-w-3.1859834.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3de54634d14196c45990f575014e0e7bbe67d33c5d371288fc591cd5380863e4
3
+ size 17091
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649496451.t1v-n-94d01c37-w-3.1935179.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32c9e68e9ac4f6fd4bc0e2c95c3b826eeced1e52606cdd8f66b4e79d8be5255a
3
+ size 95391
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649516926.t1v-n-94d01c37-w-3.2175409.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccb0ba8c7c12a26a6920d9e534c1691c6b89bd89842c37aa955130fcafb23a78
3
+ size 40
training_eval/ncc_english_span_corruption_stream/events.out.tfevents.1649517323.t1v-n-94d01c37-w-3.2180017.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aafe1ea5f46c41bbac9cb160a06ab338ed6c5e545ee02f590cba484b8f44adac
3
+ size 210231