File size: 1,225 Bytes
0281a98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from __gin__ import dynamic_registration
import tasks
import seqio
import optax
import __main__ as train_script
from t5.data import mixtures
from t5x import models
from t5x import partitioning
from t5x import utils
include 't5x/examples/t5/mt5/base.gin'
include "t5x/configs/runs/finetune.gin"
MIXTURE_OR_TASK_NAME = %gin.REQUIRED
TASK_FEATURE_LENGTHS = {"inputs": 256, "targets": 256}
INITIAL_CHECKPOINT_PATH = %gin.REQUIRED
LR = %gin.REQUIRED
TRAIN_STEPS = %gin.REQUIRED # 1000000 pre-trained steps + 10000 fine-tuning steps.
USE_CACHED_TASKS = False
DROPOUT_RATE = 0.1
RANDOM_SEED = 0
BATCH_SIZE = 32
#Fixing a small error
infer_eval/utils.DatasetConfig:
task_feature_lengths = %TASK_FEATURE_LENGTHS
#Saving every 500 steps
utils.SaveCheckpointConfig:
period = 1000
keep = 1 # number of checkpoints to keep
#optax.adamw.weight_decay = 0.1
#OPTIMIZER = @optax.adamw
#import t5x.optimizers
#OPTIMIZER = @optax.adamw
#optax.adamw.learning_rate = %LR
#optax.adamw.weight_decay = 0.1
utils.create_learning_rate_scheduler:
factors = 'constant * rsqrt_decay'
base_learning_rate = %LR
warmup_steps = 1000
# Might have to ba changed based on architecture
# partitioning.PjitPartitioner.num_partitions = 1
|