stefan-it commited on
Commit
1b28e49
1 Parent(s): 03e0a04

model: add initial version

Browse files
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-efficient-large-nl36",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "relu",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "relu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": false,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 36,
21
+ "num_heads": 16,
22
+ "num_layers": 36,
23
+ "pad_token_id": 0,
24
+ "relative_attention_max_distance": 128,
25
+ "relative_attention_num_buckets": 32,
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.23.0.dev0",
28
+ "use_cache": true,
29
+ "vocab_size": 32128
30
+ }
events.out.tfevents.1662714395.t5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3007a86c1559d58d6dbc091c2a1add83ac162d0945228d9ed3a9a58607b5bd64
3
+ size 145181617
operative_config.gin ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesh_tensorflow.optimize
2
+ import mesh_tensorflow.transformer.dataset as mesh_tensorflow2
3
+ import mesh_tensorflow.transformer.learning_rate_schedules as mesh_tensorflow3
4
+ import mesh_tensorflow.transformer.t2t_vocabulary as mesh_tensorflow4
5
+ import mesh_tensorflow.transformer.transformer as mesh_tensorflow5
6
+ import mesh_tensorflow.transformer.transformer_layers as mesh_tensorflow6
7
+ import mesh_tensorflow.transformer.utils as mesh_tensorflow7
8
+ import t5.models.mesh_transformer
9
+
10
+ # Macros:
11
+ # ==============================================================================
12
+ d_ff = 4096
13
+ d_kv = 64
14
+ d_model = 1024
15
+ dropout_rate = 0.0
16
+ inputs_length = 512
17
+ mean_noise_span_length = 3.0
18
+ MIXTURE_NAME = 'tr_corpus'
19
+ noise_density = 0.15
20
+ num_heads = 16
21
+ num_layers = 36
22
+
23
+ # Parameters for adafactor_decay_rate_pow:
24
+ # ==============================================================================
25
+ adafactor_decay_rate_pow.exponent = 0.8
26
+ adafactor_decay_rate_pow.offset = 0
27
+
28
+ # Parameters for AdafactorOptimizer:
29
+ # ==============================================================================
30
+ AdafactorOptimizer.beta1 = 0.0
31
+ AdafactorOptimizer.clipping_threshold = 1.0
32
+ AdafactorOptimizer.decay_rate = None
33
+ AdafactorOptimizer.epsilon1 = 1e-30
34
+ AdafactorOptimizer.epsilon2 = 0.001
35
+ AdafactorOptimizer.exclude_from_parameter_scale = None
36
+ AdafactorOptimizer.factored = True
37
+ AdafactorOptimizer.min_dim_size_to_factor = 128
38
+ AdafactorOptimizer.multiply_by_parameter_scale = True
39
+ AdafactorOptimizer.stacked_dim_names = None
40
+
41
+ # Parameters for Bitransformer:
42
+ # ==============================================================================
43
+ Bitransformer.shared_embedding = True
44
+
45
+ # Parameters for denoise:
46
+ # ==============================================================================
47
+ denoise.passthrough_feature_keys = None
48
+
49
+ # Parameters for decoder/DenseReluDense:
50
+ # ==============================================================================
51
+ decoder/DenseReluDense.activation = 'relu'
52
+ decoder/DenseReluDense.dropout_rate = %dropout_rate
53
+ decoder/DenseReluDense.hidden_size = %d_ff
54
+ decoder/DenseReluDense.use_bias = False
55
+
56
+ # Parameters for encoder/DenseReluDense:
57
+ # ==============================================================================
58
+ encoder/DenseReluDense.activation = 'relu'
59
+ encoder/DenseReluDense.dropout_rate = %dropout_rate
60
+ encoder/DenseReluDense.hidden_size = %d_ff
61
+ encoder/DenseReluDense.use_bias = False
62
+
63
+ # Parameters for enc_dec_attention:
64
+ # ==============================================================================
65
+ # None.
66
+
67
+ # Parameters for enc_dec_attention_bias:
68
+ # ==============================================================================
69
+ # None.
70
+
71
+ # Parameters for decoder/EncDecAttention:
72
+ # ==============================================================================
73
+ decoder/EncDecAttention.relative_attention_type = None
74
+
75
+ # Parameters for get_variable_dtype:
76
+ # ==============================================================================
77
+ get_variable_dtype.activation_dtype = 'bfloat16'
78
+
79
+ # Parameters for get_vocab_embedding_cls:
80
+ # ==============================================================================
81
+ # None.
82
+
83
+ # Parameters for get_vocabulary:
84
+ # ==============================================================================
85
+ get_vocabulary.mixture_or_task_name = %MIXTURE_NAME
86
+
87
+ # Parameters for decoder/LayerStack:
88
+ # ==============================================================================
89
+ decoder/LayerStack.dropout_rate = None
90
+ decoder/LayerStack.norm_epsilon = None
91
+ decoder/LayerStack.recompute_grads = False
92
+ decoder/LayerStack.sublayers_final = \
93
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
94
+ decoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
95
+ decoder/LayerStack.sublayers_per_layer = \
96
+ [@transformer.sublayer_rms_norm,
97
+ @transformer.sublayer_call_layer,
98
+ @transformer.sublayer_dropout,
99
+ @transformer.sublayer_residual]
100
+
101
+ # Parameters for encoder/LayerStack:
102
+ # ==============================================================================
103
+ encoder/LayerStack.dropout_rate = None
104
+ encoder/LayerStack.norm_epsilon = None
105
+ encoder/LayerStack.recompute_grads = False
106
+ encoder/LayerStack.sublayers_final = \
107
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
108
+ encoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
109
+ encoder/LayerStack.sublayers_per_layer = \
110
+ [@transformer.sublayer_rms_norm,
111
+ @transformer.sublayer_call_layer,
112
+ @transformer.sublayer_dropout,
113
+ @transformer.sublayer_residual]
114
+
115
+ # Parameters for learning_rate_schedule_noam:
116
+ # ==============================================================================
117
+ learning_rate_schedule_noam.linear_decay_fraction = 0.0
118
+ learning_rate_schedule_noam.multiplier = 1.0
119
+ learning_rate_schedule_noam.offset = 0
120
+ learning_rate_schedule_noam.warmup_steps = 10000
121
+
122
+ # Parameters for make_bitransformer:
123
+ # ==============================================================================
124
+ make_bitransformer.decoder_name = 'decoder'
125
+ make_bitransformer.encoder_name = 'encoder'
126
+
127
+ # Parameters for decoder/make_layer_stack:
128
+ # ==============================================================================
129
+ decoder/make_layer_stack.block_scope = True
130
+ decoder/make_layer_stack.layers = \
131
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
132
+ @mesh_tensorflow.transformer.transformer_layers.EncDecAttention,
133
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
134
+ decoder/make_layer_stack.num_layers = %num_layers
135
+
136
+ # Parameters for encoder/make_layer_stack:
137
+ # ==============================================================================
138
+ encoder/make_layer_stack.block_scope = True
139
+ encoder/make_layer_stack.layers = \
140
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
141
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
142
+ encoder/make_layer_stack.num_layers = %num_layers
143
+
144
+ # Parameters for mesh_train_dataset_fn:
145
+ # ==============================================================================
146
+ mesh_train_dataset_fn.mixture_or_task_name = %MIXTURE_NAME
147
+ mesh_train_dataset_fn.pack = True
148
+ mesh_train_dataset_fn.seed = None
149
+ mesh_train_dataset_fn.shuffle = True
150
+ mesh_train_dataset_fn.use_cached = False
151
+
152
+ # Parameters for noise_span_to_unique_sentinel:
153
+ # ==============================================================================
154
+ # None.
155
+
156
+ # Parameters for nonnoise_span_to_unique_sentinel:
157
+ # ==============================================================================
158
+ # None.
159
+
160
+ # Parameters for pack_dataset:
161
+ # ==============================================================================
162
+ pack_dataset.use_custom_ops = False
163
+
164
+ # Parameters for pack_or_pad:
165
+ # ==============================================================================
166
+ # None.
167
+
168
+ # Parameters for random_spans_helper:
169
+ # ==============================================================================
170
+ random_spans_helper.extra_tokens_per_span_inputs = 1
171
+ random_spans_helper.extra_tokens_per_span_targets = 1
172
+ random_spans_helper.inputs_length = %inputs_length
173
+ random_spans_helper.mean_noise_span_length = %mean_noise_span_length
174
+ random_spans_helper.noise_density = %noise_density
175
+ random_spans_helper.verbose = False
176
+
177
+ # Parameters for random_spans_noise_mask:
178
+ # ==============================================================================
179
+ # None.
180
+
181
+ # Parameters for random_spans_tokens_length:
182
+ # ==============================================================================
183
+ # None.
184
+
185
+ # Parameters for reduce_concat_tokens:
186
+ # ==============================================================================
187
+ # None.
188
+
189
+ # Parameters for rewrite_stack_variables:
190
+ # ==============================================================================
191
+ rewrite_stack_variables.max_combined_variable_size = 536870912
192
+
193
+ # Parameters for run:
194
+ # ==============================================================================
195
+ run.autostack = True
196
+ run.batch_size = ('tokens_per_batch', 65536)
197
+ run.checkpoint_input_pipeline = False
198
+ run.dataset_split = 'train'
199
+ run.ensemble_inputs = None
200
+ run.eval_checkpoint_step = None
201
+ run.eval_dataset_fn = None
202
+ run.eval_dir_suffix = None
203
+ run.eval_summary_dir = None
204
+ run.export_checkpoint_step = None
205
+ run.export_path = ''
206
+ run.init_checkpoint = None
207
+ run.iterations_per_loop = 100
208
+ run.keep_checkpoint_max = None
209
+ run.layout_rules = \
210
+ 'ensemble:ensemble,batch:batch,d_ff:model,heads:model,vocab:model,experts:batch'
211
+ run.learning_rate_schedule = @learning_rate_schedules.learning_rate_schedule_noam
212
+ run.mesh_devices = None
213
+ run.mesh_shape = @mesh_tensorflow.transformer.utils.tpu_mesh_shape()
214
+ run.mode = 'train'
215
+ run.model_type = 'bitransformer'
216
+ run.optimizer = @optimize.AdafactorOptimizer
217
+ run.output_eval_examples = True
218
+ run.perplexity_eval_steps = 100
219
+ run.predict_fn = None
220
+ run.save_checkpoints_steps = 10000
221
+ run.seen_data_init_step = 0
222
+ run.sequence_length = {'inputs': 512, 'targets': 128}
223
+ run.skip_seen_data = False
224
+ run.total_run_steps = None
225
+ run.train_dataset_fn = @t5.models.mesh_transformer.mesh_train_dataset_fn
226
+ run.train_steps = 524288
227
+ run.variable_filter = None
228
+
229
+ # Parameters for select_random_chunk:
230
+ # ==============================================================================
231
+ select_random_chunk.additional_feature_keys = None
232
+ select_random_chunk.min_length = None
233
+ select_random_chunk.passthrough_feature_keys = None
234
+ select_random_chunk.sequence_length = None
235
+ select_random_chunk.uniform_random_start = False
236
+
237
+ # Parameters for decoder/SelfAttention:
238
+ # ==============================================================================
239
+ decoder/SelfAttention.attention_func = None
240
+ decoder/SelfAttention.attention_kwargs = None
241
+ decoder/SelfAttention.combine_dims = True
242
+ decoder/SelfAttention.dropout_rate = %dropout_rate
243
+ decoder/SelfAttention.fold_scaling_into_initializer = True
244
+ decoder/SelfAttention.hyperprompt_hidden_dim = None
245
+ decoder/SelfAttention.hyperprompt_length_decoder = None
246
+ decoder/SelfAttention.hyperprompt_length_encoder = None
247
+ decoder/SelfAttention.hyperprompt_mtlshare = False
248
+ decoder/SelfAttention.hyperprompt_task_num = 8
249
+ decoder/SelfAttention.keep_query_heads_dims = False
250
+ decoder/SelfAttention.key_value_size = %d_kv
251
+ decoder/SelfAttention.num_heads = %num_heads
252
+ decoder/SelfAttention.num_memory_heads = 0
253
+ decoder/SelfAttention.relative_attention_num_buckets = 32
254
+ decoder/SelfAttention.relative_attention_type = 'bias_shared'
255
+ decoder/SelfAttention.shared_kv = False
256
+ decoder/SelfAttention.use_hyperprompt = False
257
+ decoder/SelfAttention.z_loss_coeff = None
258
+
259
+ # Parameters for encoder/SelfAttention:
260
+ # ==============================================================================
261
+ encoder/SelfAttention.attention_func = None
262
+ encoder/SelfAttention.attention_kwargs = None
263
+ encoder/SelfAttention.combine_dims = True
264
+ encoder/SelfAttention.dropout_rate = %dropout_rate
265
+ encoder/SelfAttention.fold_scaling_into_initializer = True
266
+ encoder/SelfAttention.hyperprompt_hidden_dim = None
267
+ encoder/SelfAttention.hyperprompt_length_decoder = None
268
+ encoder/SelfAttention.hyperprompt_length_encoder = None
269
+ encoder/SelfAttention.hyperprompt_mtlshare = False
270
+ encoder/SelfAttention.hyperprompt_task_num = 8
271
+ encoder/SelfAttention.keep_query_heads_dims = False
272
+ encoder/SelfAttention.key_value_size = %d_kv
273
+ encoder/SelfAttention.num_heads = %num_heads
274
+ encoder/SelfAttention.num_memory_heads = 0
275
+ encoder/SelfAttention.relative_attention_num_buckets = 32
276
+ encoder/SelfAttention.relative_attention_type = 'bias_shared'
277
+ encoder/SelfAttention.shared_kv = False
278
+ encoder/SelfAttention.use_hyperprompt = False
279
+ encoder/SelfAttention.z_loss_coeff = None
280
+
281
+ # Parameters for sentinel_id:
282
+ # ==============================================================================
283
+ sentinel_id.return_value = None
284
+
285
+ # Parameters for serialize_num_microbatches:
286
+ # ==============================================================================
287
+ serialize_num_microbatches.tokens_per_microbatch_per_replica = 4096
288
+
289
+ # Parameters for SimdMeshImpl:
290
+ # ==============================================================================
291
+ SimdMeshImpl.allreduce_in_bfloat16_max_group_size = 8
292
+
293
+ # Parameters for split_tokens:
294
+ # ==============================================================================
295
+ split_tokens.additional_feature_keys = None
296
+ split_tokens.passthrough_feature_keys = None
297
+
298
+ # Parameters for sublayer_call_layer:
299
+ # ==============================================================================
300
+ # None.
301
+
302
+ # Parameters for sublayer_dropout:
303
+ # ==============================================================================
304
+ sublayer_dropout.dropout_rate = %dropout_rate
305
+
306
+ # Parameters for sublayer_mask_padding:
307
+ # ==============================================================================
308
+ # None.
309
+
310
+ # Parameters for sublayer_residual:
311
+ # ==============================================================================
312
+ # None.
313
+
314
+ # Parameters for sublayer_rms_norm:
315
+ # ==============================================================================
316
+ sublayer_rms_norm.epsilon = 1e-06
317
+ sublayer_rms_norm.name = 'rms_norm'
318
+
319
+ # Parameters for tpu_estimator_model_fn:
320
+ # ==============================================================================
321
+ tpu_estimator_model_fn.hierarchical_tiling_spec = None
322
+ tpu_estimator_model_fn.init_variable_filter = ''
323
+ tpu_estimator_model_fn.model_info_file = ''
324
+ tpu_estimator_model_fn.outer_batch_size = 1
325
+ tpu_estimator_model_fn.tpu_summaries = False
326
+ tpu_estimator_model_fn.weight_decay_checkpoint = None
327
+
328
+ # Parameters for tpu_mesh_shape:
329
+ # ==============================================================================
330
+ tpu_mesh_shape.ensemble_parallelism = None
331
+ tpu_mesh_shape.model_parallelism = 4
332
+ tpu_mesh_shape.tpu_topology = 'v3-32'
333
+
334
+ # Parameters for unit_scaling_convention:
335
+ # ==============================================================================
336
+ unit_scaling_convention.value = False
337
+
338
+ # Parameters for decoder/Unitransformer:
339
+ # ==============================================================================
340
+ decoder/Unitransformer.d_model = %d_model
341
+ decoder/Unitransformer.ensemble = None
342
+ decoder/Unitransformer.input_full_attention = False
343
+ decoder/Unitransformer.label_smoothing = 0.0
344
+ decoder/Unitransformer.loss_denominator = None
345
+ decoder/Unitransformer.loss_fn = None
346
+ decoder/Unitransformer.loss_on_targets_only = False
347
+ decoder/Unitransformer.max_length = 512
348
+ decoder/Unitransformer.positional_embedding = False
349
+ decoder/Unitransformer.shared_embedding_and_softmax_weights = True
350
+ decoder/Unitransformer.sinusoid_positional_embedding = False
351
+ decoder/Unitransformer.token_dropout_rate = 0.0
352
+ decoder/Unitransformer.vocab_divisor = 128
353
+ decoder/Unitransformer.z_loss = 0.0001
354
+
355
+ # Parameters for encoder/Unitransformer:
356
+ # ==============================================================================
357
+ encoder/Unitransformer.d_model = %d_model
358
+ encoder/Unitransformer.ensemble = None
359
+ encoder/Unitransformer.input_full_attention = False
360
+ encoder/Unitransformer.label_smoothing = 0.0
361
+ encoder/Unitransformer.loss_denominator = None
362
+ encoder/Unitransformer.loss_fn = None
363
+ encoder/Unitransformer.loss_on_targets_only = False
364
+ encoder/Unitransformer.max_length = 512
365
+ encoder/Unitransformer.positional_embedding = False
366
+ encoder/Unitransformer.shared_embedding_and_softmax_weights = True
367
+ encoder/Unitransformer.sinusoid_positional_embedding = False
368
+ encoder/Unitransformer.token_dropout_rate = 0.0
369
+ encoder/Unitransformer.vocab_divisor = 128
370
+ encoder/Unitransformer.z_loss = 0.0001
371
+
372
+ # Parameters for VarianceScalingInitializer:
373
+ # ==============================================================================
374
+ VarianceScalingInitializer.distribution = 'normal'
375
+ VarianceScalingInitializer.mode = 'fan_in'
376
+ VarianceScalingInitializer.scale = 1.0
377
+
378
+ # Parameters for VocabEmbedding:
379
+ # ==============================================================================
380
+ VocabEmbedding.scale_variable_like_classifier_weights = False
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9bf2a75ed52ec68cc8bb59de248f6e9b5681b290d2363d950d98f3facc2a01
3
+ size 4360452147
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca0ade8cf006bd16c9d7dafebce0f7fad8ba4018f3e70947e972cf10a2b3b03
3
+ size 839200
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": null, "name_or_path": "./", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}