|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
r"""Pre-training ViT on ILSVRC-2012 as in https://arxiv.org/abs/2106.10270 |
|
|
|
This config does NOT include regularization (dropout, stochastic depth), which |
|
was shown to help with B/32, B/16, L/16 models in the paper (Figure 4). |
|
|
|
This configuration makes use of the "arg" to get_config to select which model |
|
to run, so a few examples are given below: |
|
|
|
Run training of a B/16 model: |
|
|
|
big_vision.train \ |
|
--config big_vision/configs/vit_i1k.py:variant=B/16 \ |
|
--workdir gs://[your_bucket]/big_vision/`date '+%m-%d_%H%M'` |
|
|
|
Run training of a B/32 model with custom aug-strenght and 300ep: |
|
|
|
big_vision.train \ |
|
--config big_vision/configs/vit_i1k.py:variant=B/32,aug=light1 \ |
|
--workdir gs://[your_bucket]/big_vision/`date '+%m-%d_%H%M'` \ |
|
--config.total_epochs 300 |
|
""" |
|
|
|
import big_vision.configs.common as bvcc |
|
from big_vision.configs.common_fewshot import get_fewshot_lsr |
|
import ml_collections as mlc |
|
|
|
MIXUP_DEF = { |
|
'none': dict(p=0.0, fold_in=None), |
|
'light1': dict(p=0.0, fold_in=None), |
|
'light2': dict(p=0.2, fold_in=None), |
|
'medium1': dict(p=0.2, fold_in=None), |
|
'medium2': dict(p=0.5, fold_in=None), |
|
'strong1': dict(p=0.5, fold_in=None), |
|
'strong2': dict(p=0.8, fold_in=None), |
|
} |
|
|
|
RANDAUG_DEF = { |
|
'none': '', |
|
'light1': 'randaug(2,0)', |
|
'light2': 'randaug(2,10)', |
|
'medium1': 'randaug(2,15)', |
|
'medium2': 'randaug(2,15)', |
|
'strong1': 'randaug(2,20)', |
|
'strong2': 'randaug(2,20)', |
|
} |
|
|
|
|
|
def get_config(arg=None): |
|
"""Config for training.""" |
|
arg = bvcc.parse_arg(arg, variant='B/16', runlocal=False, aug='') |
|
config = mlc.ConfigDict() |
|
|
|
config.seed = 0 |
|
config.total_epochs = 300 |
|
config.num_classes = 1000 |
|
config.loss = 'sigmoid_xent' |
|
config.init_head_bias = -6.9 |
|
|
|
|
|
|
|
|
|
aug_setting = arg.aug or { |
|
'Ti/16': 'light1', |
|
'S/32': 'medium1', |
|
'S/16': 'medium2', |
|
'B/32': 'medium2', |
|
'B/16': 'medium2', |
|
'L/16': 'medium2', |
|
}[arg.variant] |
|
|
|
config.input = dict() |
|
config.input.data = dict( |
|
name='imagenet2012', |
|
split='train[:99%]', |
|
) |
|
config.input.batch_size = 4096 |
|
config.input.cache = 'raw_data' if arg.runlocal else 'none' |
|
config.input.shuffle_buffer_size = 250_000 |
|
|
|
pp_common = ( |
|
'|value_range(-1, 1)' |
|
'|onehot(1000, key="{lbl}", key_result="labels")' |
|
'|keep("image", "labels")' |
|
) |
|
config.input.pp = ( |
|
'decode_jpeg_and_inception_crop(224)|flip_lr|' + |
|
RANDAUG_DEF[aug_setting] + |
|
pp_common.format(lbl='label') |
|
) |
|
pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common |
|
|
|
|
|
config.pp_modules = ['ops_general', 'ops_image', 'ops_text', 'archive.randaug'] |
|
|
|
|
|
|
|
|
|
config.input.prefetch = 8 |
|
config.prefetch_to_device = 4 |
|
|
|
config.log_training_steps = 50 |
|
config.ckpt_steps = 1000 |
|
|
|
|
|
config.model_name = 'vit' |
|
config.model = dict( |
|
variant=arg.variant, |
|
rep_size=True, |
|
pool_type='tok', |
|
) |
|
|
|
|
|
config.grad_clip_norm = 1.0 |
|
config.optax_name = 'scale_by_adam' |
|
config.optax = dict(mu_dtype='bfloat16') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.lr = 0.001 |
|
config.wd = 0.0001 |
|
config.schedule = dict(warmup_steps=10_000, decay_type='cosine') |
|
|
|
config.mixup = MIXUP_DEF[aug_setting] |
|
|
|
|
|
def get_eval(split, dataset='imagenet2012'): |
|
return dict( |
|
type='classification', |
|
data=dict(name=dataset, split=split), |
|
pp_fn=pp_eval.format(lbl='label'), |
|
loss_name=config.loss, |
|
log_steps=2500, |
|
cache='final' if arg.runlocal else 'none', |
|
) |
|
config.evals = {} |
|
config.evals.train = get_eval('train[:2%]') |
|
config.evals.minival = get_eval('train[99%:]') |
|
config.evals.val = get_eval('validation') |
|
config.evals.v2 = get_eval('test', dataset='imagenet_v2') |
|
config.evals.real = get_eval('validation', dataset='imagenet2012_real') |
|
config.evals.real.pp_fn = pp_eval.format(lbl='real_label') |
|
|
|
config.fewshot = get_fewshot_lsr(runlocal=arg.runlocal) |
|
config.fewshot.log_steps = 10_000 |
|
|
|
|
|
if arg.runlocal: |
|
config.input.shuffle_buffer_size = 10 |
|
config.input.batch_size = 8 |
|
config.input.cache_raw = False |
|
config.evals.train.data.split = 'train[:16]' |
|
config.evals.minival.data.split = 'train[:16]' |
|
config.evals.val.data.split = 'validation[:16]' |
|
config.evals.v2.data.split = 'test[:16]' |
|
config.evals.real.data.split = 'validation[:16]' |
|
|
|
return config |