File size: 6,836 Bytes
b144aaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
from __gin__ import dynamic_registration
import __main__ as train_script
import prot_gin.scalable_t5.scalablet5network as network
import seqio
from t5x import adafactor
from t5x import gin_utils
from t5x import models
from t5x import partitioning
from t5x import trainer
from t5x import utils
import tasks

# Macros:
# ==============================================================================
BATCH_SIZE = 2
DROPOUT_RATE = 0.0
LABEL_SMOOTHING = 0.0
LOSS_NORMALIZING_FACTOR = None
MIXTURE_OR_TASK_MODULE = None
MIXTURE_OR_TASK_NAME = 'test_task'
MODEL = @models.EncoderDecoderModel()
MODEL_DIR = \
    '/tmp/t5x/scalable_tiny_test_pretrain/'
OPTIMIZER = @adafactor.Adafactor()
RANDOM_SEED = None
SHUFFLE_TRAIN_EXAMPLES = True
TASK_FEATURE_LENGTHS = {'inputs': 512, 'targets': 512}
TRAIN_STEPS = 6000000
USE_CACHED_TASKS = False
USE_HARDWARE_RNG = False
VOCABULARY = @seqio.SentencePieceVocabulary()
Z_LOSS = 0.0001

# Parameters for adafactor.Adafactor:
# ==============================================================================
adafactor.Adafactor.decay_rate = 0.8
adafactor.Adafactor.logical_factor_rules = \
    @adafactor.standard_logical_factor_rules()
adafactor.Adafactor.skip_nan_updates = True
adafactor.Adafactor.step_offset = 0

# Parameters for utils.CheckpointConfig:
# ==============================================================================
utils.CheckpointConfig.restore = @utils.RestoreCheckpointConfig()
utils.CheckpointConfig.save = @utils.SaveCheckpointConfig()

# Parameters for utils.create_learning_rate_scheduler:
# ==============================================================================
utils.create_learning_rate_scheduler.base_learning_rate = 1.0
utils.create_learning_rate_scheduler.factors = 'constant * rsqrt_decay'
utils.create_learning_rate_scheduler.warmup_steps = 10000

# Parameters for train/utils.DatasetConfig:
# ==============================================================================
train/utils.DatasetConfig.batch_size = %BATCH_SIZE
train/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
train/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
train/utils.DatasetConfig.pack = False
train/utils.DatasetConfig.seed = None
train/utils.DatasetConfig.shuffle = %SHUFFLE_TRAIN_EXAMPLES
train/utils.DatasetConfig.split = 'train'
train/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
train/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS

# Parameters for train_eval/utils.DatasetConfig:
# ==============================================================================
train_eval/utils.DatasetConfig.batch_size = %BATCH_SIZE
train_eval/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
train_eval/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
train_eval/utils.DatasetConfig.pack = False
train_eval/utils.DatasetConfig.seed = 42
train_eval/utils.DatasetConfig.shuffle = False
train_eval/utils.DatasetConfig.split = 'validation'
train_eval/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
train_eval/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS

# Parameters for models.EncoderDecoderModel:
# ==============================================================================
models.EncoderDecoderModel.input_vocabulary = %VOCABULARY
models.EncoderDecoderModel.label_smoothing = %LABEL_SMOOTHING
models.EncoderDecoderModel.loss_normalizing_factor = %LOSS_NORMALIZING_FACTOR
models.EncoderDecoderModel.module = @network.Transformer()
models.EncoderDecoderModel.optimizer_def = %OPTIMIZER
models.EncoderDecoderModel.output_vocabulary = %VOCABULARY
models.EncoderDecoderModel.z_loss = %Z_LOSS

# Parameters for partitioning.PjitPartitioner:
# ==============================================================================
partitioning.PjitPartitioner.logical_axis_rules = \
    @partitioning.standard_logical_axis_rules()
partitioning.PjitPartitioner.model_parallel_submesh = None
partitioning.PjitPartitioner.num_partitions = 1

# Parameters for utils.RestoreCheckpointConfig:
# ==============================================================================
utils.RestoreCheckpointConfig.path = []

# Parameters for utils.SaveCheckpointConfig:
# ==============================================================================
utils.SaveCheckpointConfig.dtype = 'float32'
utils.SaveCheckpointConfig.keep = 3
utils.SaveCheckpointConfig.period = 40000
utils.SaveCheckpointConfig.save_dataset = False

# Parameters for seqio.SentencePieceVocabulary:
# ==============================================================================
seqio.SentencePieceVocabulary.extra_ids = 225
seqio.SentencePieceVocabulary.sentencepiece_model_file = 'm.model'

# Parameters for partitioning.standard_logical_axis_rules:
# ==============================================================================
partitioning.standard_logical_axis_rules.activation_partitioning_dims = 1
partitioning.standard_logical_axis_rules.parameter_partitioning_dims = 1

# Parameters for network.T5Config:
# ==============================================================================
network.T5Config.dropout_rate = %DROPOUT_RATE
network.T5Config.dtype = 'bfloat16'
network.T5Config.emb_dim = 8
network.T5Config.head_dim = 6
network.T5Config.logits_via_embedding = False
network.T5Config.mlp_activations = ('silu', 'linear')
network.T5Config.mlp_dim = 16
network.T5Config.num_decoder_layers = 3
network.T5Config.num_encoder_layers = 3
network.T5Config.num_heads = 4
network.T5Config.remat_policy = 'minimal'
network.T5Config.scan_layers = True
network.T5Config.vocab_size = 256

# Parameters for train_script.train:
# ==============================================================================
train_script.train.checkpoint_cfg = @utils.CheckpointConfig()
train_script.train.eval_period = 20000
train_script.train.eval_steps = 200
train_script.train.infer_eval_dataset_cfg = None
train_script.train.model = %MODEL
train_script.train.model_dir = %MODEL_DIR
train_script.train.partitioner = @partitioning.PjitPartitioner()
train_script.train.random_seed = %RANDOM_SEED
train_script.train.summarize_config_fn = @gin_utils.summarize_gin_config
train_script.train.total_steps = %TRAIN_STEPS
train_script.train.train_dataset_cfg = @train/utils.DatasetConfig()
train_script.train.train_eval_dataset_cfg = @train_eval/utils.DatasetConfig()
train_script.train.trainer_cls = @trainer.Trainer
train_script.train.use_hardware_rng = %USE_HARDWARE_RNG

# Parameters for trainer.Trainer:
# ==============================================================================
trainer.Trainer.learning_rate_fn = @utils.create_learning_rate_scheduler()
trainer.Trainer.num_microbatches = 1

# Parameters for network.Transformer:
# ==============================================================================
network.Transformer.config = @network.T5Config()