wei commited on
Commit
cfee926
1 Parent(s): b1c65c7

Update from weiding

Browse files
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5Model"
4
+ ],
5
+ "d_ff": 4096,
6
+ "d_kv": 64,
7
+ "d_model": 1024,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "n_positions": 512,
16
+ "num_decoder_layers": 24,
17
+ "num_heads": 16,
18
+ "num_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "relative_attention_num_buckets": 32,
22
+ "task_specific_params": {
23
+ "summarization": {
24
+ "max_length": 512,
25
+ "num_beams": 4,
26
+ "prefix": "code comment java: "
27
+ }
28
+ },
29
+ "vocab_size": 32128
30
+ }
operative_config.gin ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesh_tensorflow.optimize
2
+ import mesh_tensorflow.transformer.learning_rate_schedules
3
+ import mesh_tensorflow.transformer.transformer
4
+ import mesh_tensorflow.transformer.transformer_layers
5
+ import t5.data.mixtures
6
+ import t5.data.sentencepiece_vocabulary
7
+ import t5.models.mesh_transformer
8
+
9
+ # Macros:
10
+ # ==============================================================================
11
+ d_ff = 4096
12
+ d_kv = 64
13
+ d_model = 1024
14
+ dropout_rate = 0.1
15
+ inputs_length = 512
16
+ mean_noise_span_length = 3.0
17
+ MIXTURE_NAME = 'code_comment_generation'
18
+ noise_density = 0.15
19
+ num_heads = 16
20
+ num_layers = 24
21
+ targets_length = @preprocessors.random_spans_targets_length()
22
+
23
+ # Parameters for AdafactorOptimizer:
24
+ # ==============================================================================
25
+ AdafactorOptimizer.beta1 = 0.0
26
+ AdafactorOptimizer.clipping_threshold = 1.0
27
+ AdafactorOptimizer.decay_rate = None
28
+ AdafactorOptimizer.epsilon1 = 1e-30
29
+ AdafactorOptimizer.epsilon2 = 0.001
30
+ AdafactorOptimizer.factored = True
31
+ AdafactorOptimizer.min_dim_size_to_factor = 128
32
+ AdafactorOptimizer.multiply_by_parameter_scale = True
33
+
34
+ # Parameters for Bitransformer:
35
+ # ==============================================================================
36
+ Bitransformer.shared_embedding = True
37
+
38
+ # Parameters for decoder/DenseReluDense:
39
+ # ==============================================================================
40
+ decoder/DenseReluDense.activation = 'relu'
41
+ decoder/DenseReluDense.dropout_rate = %dropout_rate
42
+ decoder/DenseReluDense.hidden_size = %d_ff
43
+ decoder/DenseReluDense.use_bias = False
44
+
45
+ # Parameters for encoder/DenseReluDense:
46
+ # ==============================================================================
47
+ encoder/DenseReluDense.activation = 'relu'
48
+ encoder/DenseReluDense.dropout_rate = %dropout_rate
49
+ encoder/DenseReluDense.hidden_size = %d_ff
50
+ encoder/DenseReluDense.use_bias = False
51
+
52
+ # Parameters for enc_dec_attention:
53
+ # ==============================================================================
54
+ # None.
55
+
56
+ # Parameters for enc_dec_attention_bias:
57
+ # ==============================================================================
58
+ # None.
59
+
60
+ # Parameters for decoder/EncDecAttention:
61
+ # ==============================================================================
62
+ decoder/EncDecAttention.relative_attention_type = None
63
+
64
+ # Parameters for get_variable_dtype:
65
+ # ==============================================================================
66
+ get_variable_dtype.activation_dtype = 'bfloat16'
67
+
68
+ # Parameters for get_vocab_embedding_cls:
69
+ # ==============================================================================
70
+ # None.
71
+
72
+ # Parameters for get_vocabulary:
73
+ # ==============================================================================
74
+ get_vocabulary.mixture_or_task_name = %MIXTURE_NAME
75
+
76
+ # Parameters for decoder/LayerStack:
77
+ # ==============================================================================
78
+ decoder/LayerStack.dropout_rate = None
79
+ decoder/LayerStack.norm_epsilon = None
80
+ decoder/LayerStack.recompute_grads = False
81
+ decoder/LayerStack.sublayers_final = \
82
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
83
+ decoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
84
+ decoder/LayerStack.sublayers_per_layer = \
85
+ [@transformer.sublayer_rms_norm,
86
+ @transformer.sublayer_call_layer,
87
+ @transformer.sublayer_dropout,
88
+ @transformer.sublayer_residual]
89
+
90
+ # Parameters for encoder/LayerStack:
91
+ # ==============================================================================
92
+ encoder/LayerStack.dropout_rate = None
93
+ encoder/LayerStack.norm_epsilon = None
94
+ encoder/LayerStack.recompute_grads = False
95
+ encoder/LayerStack.sublayers_final = \
96
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
97
+ encoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
98
+ encoder/LayerStack.sublayers_per_layer = \
99
+ [@transformer.sublayer_rms_norm,
100
+ @transformer.sublayer_call_layer,
101
+ @transformer.sublayer_dropout,
102
+ @transformer.sublayer_residual]
103
+
104
+ # Parameters for learning_rate_schedule_noam:
105
+ # ==============================================================================
106
+ learning_rate_schedule_noam.linear_decay_fraction = 0.0
107
+ learning_rate_schedule_noam.multiplier = 1.0
108
+ learning_rate_schedule_noam.offset = 0
109
+ learning_rate_schedule_noam.warmup_steps = 10000
110
+
111
+ # Parameters for make_bitransformer:
112
+ # ==============================================================================
113
+ make_bitransformer.decoder_name = 'decoder'
114
+ make_bitransformer.encoder_name = 'encoder'
115
+
116
+ # Parameters for decoder/make_layer_stack:
117
+ # ==============================================================================
118
+ decoder/make_layer_stack.block_scope = True
119
+ decoder/make_layer_stack.layers = \
120
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
121
+ @mesh_tensorflow.transformer.transformer_layers.EncDecAttention,
122
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
123
+ decoder/make_layer_stack.num_layers = %num_layers
124
+
125
+ # Parameters for encoder/make_layer_stack:
126
+ # ==============================================================================
127
+ encoder/make_layer_stack.block_scope = True
128
+ encoder/make_layer_stack.layers = \
129
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
130
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
131
+ encoder/make_layer_stack.num_layers = %num_layers
132
+
133
+ # Parameters for maybe_print_dataset:
134
+ # ==============================================================================
135
+ maybe_print_dataset.should_print = False
136
+
137
+ # Parameters for mesh_train_dataset_fn:
138
+ # ==============================================================================
139
+ mesh_train_dataset_fn.mixture_or_task_name = %MIXTURE_NAME
140
+ mesh_train_dataset_fn.use_cached = False
141
+
142
+ # Parameters for pack_dataset:
143
+ # ==============================================================================
144
+ pack_dataset.use_custom_ops = False
145
+
146
+ # Parameters for pack_or_pad:
147
+ # ==============================================================================
148
+ # None.
149
+
150
+ # Parameters for random_spans_helper:
151
+ # ==============================================================================
152
+ # None.
153
+
154
+ # Parameters for targets_length/random_spans_helper:
155
+ # ==============================================================================
156
+ targets_length/random_spans_helper.extra_tokens_per_span_inputs = 1
157
+ targets_length/random_spans_helper.extra_tokens_per_span_targets = 1
158
+ targets_length/random_spans_helper.inputs_length = %inputs_length
159
+ targets_length/random_spans_helper.mean_noise_span_length = %mean_noise_span_length
160
+ targets_length/random_spans_helper.noise_density = %noise_density
161
+
162
+ # Parameters for targets_length/random_spans_targets_length:
163
+ # ==============================================================================
164
+ # None.
165
+
166
+ # Parameters for run:
167
+ # ==============================================================================
168
+ run.autostack = True
169
+ run.batch_size = ('tokens_per_batch', 131072)
170
+ run.dataset_split = 'train'
171
+ run.ensemble_inputs = None
172
+ run.eval_checkpoint_step = None
173
+ run.eval_dataset_fn = None
174
+ run.eval_summary_dir = None
175
+ run.export_checkpoint_step = None
176
+ run.export_path = ''
177
+ run.init_checkpoint = \
178
+ 'gs://t5example/ms/models/unsupervised_large_4096/model.ckpt-240000'
179
+ run.iterations_per_loop = 1000
180
+ run.keep_checkpoint_max = None
181
+ run.layout_rules = \
182
+ 'ensemble:ensemble,batch:batch,d_ff:model,heads:model,vocab:model,experts:batch'
183
+ run.learning_rate_schedule = @learning_rate_schedules.learning_rate_schedule_noam
184
+ run.mesh_devices = None
185
+ run.mesh_shape = @mesh_tensorflow.transformer.utils.tpu_mesh_shape()
186
+ run.mode = 'train'
187
+ run.model_type = 'bitransformer'
188
+ run.optimizer = @optimize.AdafactorOptimizer
189
+ run.perplexity_eval_steps = 100
190
+ run.predict_fn = None
191
+ run.save_checkpoints_steps = 9000
192
+ run.sequence_length = {'inputs': %inputs_length, 'targets': %targets_length}
193
+ run.train_dataset_fn = @t5.models.mesh_transformer.mesh_train_dataset_fn
194
+ run.train_steps = 390000
195
+ run.variable_filter = None
196
+ run.vocabulary = @mesh_transformer.get_vocabulary()
197
+
198
+ # Parameters for decoder/SelfAttention:
199
+ # ==============================================================================
200
+ decoder/SelfAttention.attention_func = None
201
+ decoder/SelfAttention.attention_kwargs = None
202
+ decoder/SelfAttention.combine_dims = True
203
+ decoder/SelfAttention.dropout_rate = %dropout_rate
204
+ decoder/SelfAttention.fold_scaling_into_initializer = True
205
+ decoder/SelfAttention.keep_query_heads_dims = False
206
+ decoder/SelfAttention.key_value_size = %d_kv
207
+ decoder/SelfAttention.num_heads = %num_heads
208
+ decoder/SelfAttention.num_memory_heads = 0
209
+ decoder/SelfAttention.relative_attention_num_buckets = 32
210
+ decoder/SelfAttention.relative_attention_type = 'bias_shared'
211
+ decoder/SelfAttention.shared_kv = False
212
+
213
+ # Parameters for encoder/SelfAttention:
214
+ # ==============================================================================
215
+ encoder/SelfAttention.attention_func = None
216
+ encoder/SelfAttention.attention_kwargs = None
217
+ encoder/SelfAttention.combine_dims = True
218
+ encoder/SelfAttention.dropout_rate = %dropout_rate
219
+ encoder/SelfAttention.fold_scaling_into_initializer = True
220
+ encoder/SelfAttention.keep_query_heads_dims = False
221
+ encoder/SelfAttention.key_value_size = %d_kv
222
+ encoder/SelfAttention.num_heads = %num_heads
223
+ encoder/SelfAttention.num_memory_heads = 0
224
+ encoder/SelfAttention.relative_attention_num_buckets = 32
225
+ encoder/SelfAttention.relative_attention_type = 'bias_shared'
226
+ encoder/SelfAttention.shared_kv = False
227
+
228
+ # Parameters for SentencePieceVocabulary:
229
+ # ==============================================================================
230
+ # None.
231
+
232
+ # Parameters for serialize_num_microbatches:
233
+ # ==============================================================================
234
+ serialize_num_microbatches.tokens_per_microbatch_per_replica = 2048
235
+
236
+ # Parameters for shift_targets:
237
+ # ==============================================================================
238
+ shift_targets.bos_id = 0
239
+ shift_targets.eos_id = 1
240
+
241
+ # Parameters for sublayer_call_layer:
242
+ # ==============================================================================
243
+ # None.
244
+
245
+ # Parameters for sublayer_dropout:
246
+ # ==============================================================================
247
+ sublayer_dropout.dropout_rate = %dropout_rate
248
+
249
+ # Parameters for sublayer_mask_padding:
250
+ # ==============================================================================
251
+ # None.
252
+
253
+ # Parameters for sublayer_residual:
254
+ # ==============================================================================
255
+ # None.
256
+
257
+ # Parameters for sublayer_rms_norm:
258
+ # ==============================================================================
259
+ sublayer_rms_norm.epsilon = 1e-06
260
+ sublayer_rms_norm.name = 'rms_norm'
261
+
262
+ # Parameters for tpu_estimator_model_fn:
263
+ # ==============================================================================
264
+ tpu_estimator_model_fn.hierarchical_tiling_spec = None
265
+ tpu_estimator_model_fn.model_info_file = None
266
+ tpu_estimator_model_fn.outer_batch_size = 1
267
+ tpu_estimator_model_fn.score_in_predict_mode = False
268
+ tpu_estimator_model_fn.tpu_summaries = False
269
+
270
+ # Parameters for tpu_mesh_shape:
271
+ # ==============================================================================
272
+ tpu_mesh_shape.ensemble_parallelism = None
273
+ tpu_mesh_shape.model_parallelism = 1
274
+ tpu_mesh_shape.tpu_topology = 'v3-8'
275
+
276
+ # Parameters for unit_scaling_convention:
277
+ # ==============================================================================
278
+ unit_scaling_convention.value = False
279
+
280
+ # Parameters for decoder/Unitransformer:
281
+ # ==============================================================================
282
+ decoder/Unitransformer.d_model = %d_model
283
+ decoder/Unitransformer.ensemble = None
284
+ decoder/Unitransformer.input_full_attention = False
285
+ decoder/Unitransformer.label_smoothing = 0.0
286
+ decoder/Unitransformer.loss_denominator = None
287
+ decoder/Unitransformer.loss_fn = None
288
+ decoder/Unitransformer.loss_on_targets_only = False
289
+ decoder/Unitransformer.max_length = 512
290
+ decoder/Unitransformer.positional_embedding = False
291
+ decoder/Unitransformer.shared_embedding_and_softmax_weights = True
292
+ decoder/Unitransformer.sinusoid_positional_embedding = False
293
+ decoder/Unitransformer.token_dropout_rate = 0.0
294
+ decoder/Unitransformer.vocab_divisor = 128
295
+ decoder/Unitransformer.z_loss = 0.0001
296
+
297
+ # Parameters for encoder/Unitransformer:
298
+ # ==============================================================================
299
+ encoder/Unitransformer.d_model = %d_model
300
+ encoder/Unitransformer.ensemble = None
301
+ encoder/Unitransformer.input_full_attention = False
302
+ encoder/Unitransformer.label_smoothing = 0.0
303
+ encoder/Unitransformer.loss_denominator = None
304
+ encoder/Unitransformer.loss_fn = None
305
+ encoder/Unitransformer.loss_on_targets_only = False
306
+ encoder/Unitransformer.max_length = 512
307
+ encoder/Unitransformer.positional_embedding = False
308
+ encoder/Unitransformer.shared_embedding_and_softmax_weights = True
309
+ encoder/Unitransformer.sinusoid_positional_embedding = False
310
+ encoder/Unitransformer.token_dropout_rate = 0.0
311
+ encoder/Unitransformer.vocab_divisor = 128
312
+ encoder/Unitransformer.z_loss = 0.0001
313
+
314
+ # Parameters for VarianceScalingInitializer:
315
+ # ==============================================================================
316
+ VarianceScalingInitializer.distribution = 'normal'
317
+ VarianceScalingInitializer.mode = 'fan_in'
318
+ VarianceScalingInitializer.scale = 1.0
319
+
320
+ # Parameters for VocabEmbedding:
321
+ # ==============================================================================
322
+ VocabEmbedding.scale_variable_like_classifier_weights = False
323
+
324
+ # Parameters for Vocabulary:
325
+ # ==============================================================================
326
+ # None.
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca24f11e0bb3efe2fd53e4b2ee1086d341b6b716c065f5635e40229db499976
3
+ size 2950910481
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9856b76e9978cc5805f0566cedabd2fc7bdb1a3ee22d52545100c056cb09a59c
3
+ size 797030
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false}