diff --git a/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd5602bdebae370894310f89effc72b897bab2ee Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04f00d8b0c59f53307e47e4558295a5cac47aaf3 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f0b30b47f1e913c2c005f8cd5a24eaca8fe59c Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ce960daae4d2de05607a6fde0900049b28696b7 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cff90c778cf69d869fb42b83700ac2379ff3f7a Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5768abec3a10a4c9c752a4ee11f2b58bc0201a9 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3be1e86e8700335c4076cc311bbc5013d8d99ca8 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..363e70b9984b504fe7ac6fd571b1974a6da734da Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d7cc9721acc20c922725a61d2a5e524cb1fb27a Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14db1c29856ccd5f4b7e6663cc1348dc40e29766 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa8c0f12b05836a700bd1dde31410d361b0292dc Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cec7da398ae7356266796014b32d5a0724f81e9 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58b3b0c5bac631e03cfa2b8e70a72af5766a9026 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..117d55d400ca94f3d61737ace960b2f538400d6a Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73a9d80ce0614a824ed7a2aba452c30ed7538fb5 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a533b45dea8b3d7252eb0cc60591d214575e3767 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72fb36db4945f3b8d84b3f31f5571cf2d1d0cd5d Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5b020fd4b4e0930af3268306b2da3990923df3f Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..370d37bc6c89fa5b48b96e72d8ec7559dfa94d2d Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c7898a9a2529edc8a3010998122b1921cc0d3fc Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce723083ed04931f0de8cc0f4b96f10183cb0a65 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c73cd0178b1e74db8c7931fdd392a6d6397b8a Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87f99fafbbed02fd832f0e961915b1fbf36ba924 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab945938be27ed98d390a354b559c48cec58bbb7 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..614f5e519f9037e76ad7be1ce4acaf4f01ee6920 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4696fb50950490965f222546538375bc5a9fc91 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef004f69983051f157d9eff593e9c57ce4f6709e Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1df169befe45d1f4db94ed9003d16cc8ff724915 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a49076a0282387482ee3caf0e7d14e1f0c419f98 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c7236c2b03365232f52fb63c7c44a8d5d1dfe90 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91baa42d6579c53a7aa29e24787ebef82ce3aded Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceed1acbe49ddfa109f347620d777099238968a6 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34329f2bac0dfac2c4ee52921b9f7e2626692cad Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38ebd2d56ab2888a0f165601d57ab0801a05511d Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c39f6397fa799e971ca1ce2d8c1050770e59226 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e894fd9af1966fd64487da103f99ab3f654fa934 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb17cc52133937f55f6cc700f15967eff13e76b Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6cc12ebb4192e945fcfb0b2ee22d2bb84e0b543 Binary files /dev/null and b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/_types.py b/pytorch-image-models/timm/optim/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..c24eddd108f661327f50d27e85f993ac213152a5 --- /dev/null +++ b/pytorch-image-models/timm/optim/_types.py @@ -0,0 +1,25 @@ +from typing import Any, Dict, Iterable, Union, Protocol, Type +try: + from typing import TypeAlias, TypeVar +except ImportError: + from typing_extensions import TypeAlias, TypeVar + +import torch +import torch.optim + +try: + from torch.optim.optimizer import ParamsT +except (ImportError, TypeError): + ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] + + +OptimType = Type[torch.optim.Optimizer] + + +class OptimizerCallable(Protocol): + """Protocol for optimizer constructor signatures.""" + + def __call__(self, params: ParamsT, **kwargs) -> torch.optim.Optimizer: ... + + +__all__ = ['ParamsT', 'OptimType', 'OptimizerCallable'] \ No newline at end of file diff --git a/pytorch-image-models/timm/optim/adafactor_bv.py b/pytorch-image-models/timm/optim/adafactor_bv.py new file mode 100644 index 0000000000000000000000000000000000000000..298d43bb7e041c0e988508afe411bec8ee95842f --- /dev/null +++ b/pytorch-image-models/timm/optim/adafactor_bv.py @@ -0,0 +1,320 @@ +""" Adafactor (Big Vision variant) for PyTorch + +Adapted from the implementation in big vision: https://github.com/google-research/big_vision + +Described in 'Scaling Vision Transformers': https://arxiv.org/abs/2106.04560 + +Adaptation and PyTorch modifications by Ross Wightman +""" +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.optim import Optimizer + +from ._types import ParamsT + + +def _get_scalar_dtype(): + """Get the scalar dtype that the optimizer uses for state""" + return torch.float64 + + +def _factored_dims( + shape: Tuple[int, ...], + factored: bool, + min_dim_size_to_factor: int +) -> Optional[tuple[int, int]]: + """Whether to use a factored second moment estimator. + + This function returns a tuple with the two largest axes to reduce over. + If no two dimensions have size >= min_dim_size_to_factor, return None. + + Args: + shape: an input shape + factored: whether to use factored second-moment estimator for > 2d vars. + min_dim_size_to_factor: only factor accumulator if two array dimensions have at least this size. + + Returns: + None or a tuple of ints + """ + if not factored or len(shape) < 2: + return None + sorted_dims = sorted(((x, i) for i, x in enumerate(shape))) + if shape[sorted_dims[-2][1]] < min_dim_size_to_factor: + return None + return int(sorted_dims[-2][1]), int(sorted_dims[-1][1]) + + +class AdafactorBigVision(Optimizer): + """ + PyTorch implementation of BigVision's Adafactor variant with both single and multi tensor implementations. + + Adapted from https://github.com/google-research/big_vision by Ross Wightman + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1.0, + min_dim_size_to_factor: int = 16, + decay_rate: float = 0.8, + decay_offset: int = 0, + beta2_cap: float = 0.999, + momentum: Optional[float] = 0.9, + momentum_dtype: Union[str, torch.dtype] = torch.bfloat16, + eps: Optional[float] = None, + weight_decay: float = 0.0, + clipping_threshold: Optional[float] = None, + unscaled_wd: bool = False, + caution: bool = False, + *, + foreach: Optional[bool] = False, + ): + if isinstance(momentum_dtype, str): + if momentum_dtype == 'float16': + momentum_dtype = torch.float16 + elif momentum_dtype == 'bfloat16': + momentum_dtype = torch.bfloat16 + else: + assert momentum_dtype == 'float32', f'{momentum_dtype} dtype not supported' + momentum_dtype = torch.float32 + # FIXME try to check if momentum dtype is appropriate for device? Torch API not great for this. + + defaults = dict( + lr=lr, + min_dim_size_to_factor=min_dim_size_to_factor, + decay_rate=decay_rate, + decay_offset=decay_offset, + beta2_cap=beta2_cap, + momentum=momentum, + momentum_dtype=momentum_dtype, + eps=eps, + weight_decay=weight_decay, + clipping_threshold=clipping_threshold, + unscaled_wd=unscaled_wd, + caution=caution, + foreach=foreach, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('caution', False) + group.setdefault('foreach', None) + for p in group['params']: + p_state = self.state.get(p, {}) + if len(p_state) != 0 and not torch.is_tensor(p_state['step']): + p_state['step'] = torch.tensor(float(p_state['step']), dtype=_get_scalar_dtype()) + + if 'exp_avg' in p_state and torch.is_tensor(p_state['exp_avg']): + # FIXME this is a bit of a hack, optimizer.load_state_dict appears to upcast + # the momentum to float32 (it's half precision in the state_dict), need to + # look into this further. Better to override _process_value_according_to_param_policy? + p_state['exp_avg'] = p_state['exp_avg'].to(dtype=self.defaults['momentum_dtype']) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avg_sq_rs = [] + exp_avg_sq_cs = [] + exp_avg_sqs = [] + state_steps = [] + exp_avgs = [] # For momentum + + for p in group['params']: + if p.grad is None: + continue + + if p.grad.is_sparse: + raise RuntimeError("Sparse gradients not supported") + + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + + if len(state) == 0: + # NOTE step on CPU, probably need some more though to make capturable + state['step'] = torch.tensor(0.0, dtype=_get_scalar_dtype()) + + shape = p.grad.shape + factored_dims = _factored_dims( + shape, + factored=True, + min_dim_size_to_factor=self.defaults['min_dim_size_to_factor'] + ) + + if factored_dims is not None: + dc, dr = factored_dims + row_shape = list(p.grad.shape) + row_shape[dr] = 1 + col_shape = list(p.grad.shape) + col_shape[dc] = 1 + state['exp_avg_sq_r'] = p.grad.new_zeros(row_shape) + state['exp_avg_sq_c'] = p.grad.new_zeros(col_shape) + else: + state['exp_avg_sq'] = torch.zeros_like(p.grad, memory_format=torch.preserve_format) + + if self.defaults['momentum'] is not None: + state['exp_avg'] = torch.zeros_like(p.grad, dtype=self.defaults['momentum_dtype']) + + state_steps.append(state['step']) + exp_avg_sq_rs.append(state.get('exp_avg_sq_r', None)) + exp_avg_sq_cs.append(state.get('exp_avg_sq_c', None)) + exp_avg_sqs.append(state.get('exp_avg_sq', None)) + exp_avgs.append(state.get('exp_avg', None)) + + if group['foreach']: + func = _multi_tensor_adafactor + else: + func = _single_tensor_adafactor + + func( + params=params_with_grad, + grads=grads, + exp_avg_sq_rs=exp_avg_sq_rs, + exp_avg_sq_cs=exp_avg_sq_cs, + exp_avg_sqs=exp_avg_sqs, + exp_avgs=exp_avgs, + state_steps=state_steps, + beta2_decay=group['decay_rate'], + beta2_cap=group['beta2_cap'], + min_dim_size_to_factor=group['min_dim_size_to_factor'], + eps=group['eps'], + lr=group['lr'], + weight_decay=group['weight_decay'], + momentum=group['momentum'], + momentum_dtype=group['momentum_dtype'], + clipping_threshold=group['clipping_threshold'], + unscaled_wd=group['unscaled_wd'], + caution=group['caution'], + ) + + return loss + + +def _single_tensor_adafactor( + params: List[Tensor], + grads: List[Tensor], + exp_avg_sq_rs: List[Optional[Tensor]], + exp_avg_sq_cs: List[Optional[Tensor]], + exp_avg_sqs: List[Optional[Tensor]], + exp_avgs: List[Optional[Tensor]], + state_steps: List[Tensor], + *, + beta2_decay: float, + beta2_cap: float, + min_dim_size_to_factor: int, + eps: float, + lr: float, + weight_decay: float, + momentum: Optional[float], + momentum_dtype: Union[str, torch.dtype], + clipping_threshold: Optional[float], + unscaled_wd: bool, + caution: bool, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg_sq_r = exp_avg_sq_rs[i] + exp_avg_sq_c = exp_avg_sq_cs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg = exp_avgs[i] + step_t = state_steps[i] + if eps is None: + # default eps for avoiding div by zero, diff from float type eps + eps = 1e-7 if grad.dtype == torch.float16 else 1e-30 + + # Update step + step_t += 1 + beta2_t = min(beta2_cap, 1.0 - float(step_t) ** (-beta2_decay)) + one_minus_beta2_t = 1 - beta2_t + + grad_sqr = torch.square(grad) + eps + # NOTE application of eps (epsilon1) mirrors the optax/big vision/t5x approach + if exp_avg_sq is None: + # factorized second moment + dc, dr = _factored_dims(grad.shape, True, min_dim_size_to_factor=min_dim_size_to_factor) + exp_avg_sq_r.lerp_(grad_sqr.mean(dim=dr, keepdim=True), one_minus_beta2_t) + exp_avg_sq_c.lerp_(grad_sqr.mean(dim=dc, keepdim=True), one_minus_beta2_t) + + reduce_dc = dc - 1 if dc > dr else dc + row_col_mean = exp_avg_sq_r.mean(dim=reduce_dc, keepdim=True) + row_factor = (exp_avg_sq_r / row_col_mean).rsqrt() + col_factor = exp_avg_sq_c.rsqrt() + + update = grad * row_factor * col_factor + else: + # non-factorized second moment + assert exp_avg_sq_r is None and exp_avg_sq_c is None + exp_avg_sq.lerp_(grad_sqr, one_minus_beta2_t) + update = grad * exp_avg_sq.rsqrt() + + # Clip by RMS value + if clipping_threshold is not None: + denom = (update.norm(2) / ((update.numel() ** 0.5) / clipping_threshold)).clamp_(max=1.0) + update.div_(denom) + + # Apply momentum (in different dtype) + if momentum is not None and exp_avg is not None: + if momentum_dtype != grad.dtype: + exp_avg.lerp_(update.to(momentum_dtype), 1 - momentum) # ema + update = exp_avg.to(grad.dtype) + else: + exp_avg.lerp_(update, 1 - momentum) # ema + update = exp_avg.clone() + + if caution: + # apply caution as per 'Cautious Optimizers': https://arxiv.org/abs/2411.16085 + mask = (update * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + update.mul_(mask) + + # Scale by learning rate + update.mul_(lr) + + # Perform weight decay + if weight_decay != 0: + if unscaled_wd: + # match big vision impl, 'fully decoupled' decay w/o LR scaling + param.mul_(1. - weight_decay) + else: + # match typical pytorch behaviour for decoupled decay, eg adamw where wd is scaled by LR + param.mul_(1. - lr * weight_decay) + + # Update parameters + param.add_(update, alpha=-1.0) + + +def _multi_tensor_adafactor( + params: List[Tensor], + grads: List[Tensor], + exp_avg_sq_rs: List[Optional[Tensor]], + exp_avg_sq_cs: List[Optional[Tensor]], + exp_avg_sqs: List[Optional[Tensor]], + exp_avgs: List[Optional[Tensor]], + state_steps: List[Tensor], + *, + beta2_decay: float, + beta2_cap: float, + min_dim_size_to_factor: int, + eps: float, + lr: float, + weight_decay: float, + momentum: Optional[float], + momentum_dtype: Union[str, torch.dtype], + clipping_threshold: Optional[float], + unscaled_wd: bool, + caution: bool, +): + # FIXME TODO + assert False, 'multi-tensor fn (foreach=True) not implemented yet' diff --git a/pytorch-image-models/timm/optim/adamp.py b/pytorch-image-models/timm/optim/adamp.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9ac3395d233219835108da323f2c20bbd9b5f3 --- /dev/null +++ b/pytorch-image-models/timm/optim/adamp.py @@ -0,0 +1,120 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + delta=0.1, + wd_ratio=0.1, + nesterov=False, + ): + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + delta=delta, + wd_ratio=wd_ratio, + nesterov=nesterov, + ) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/pytorch-image-models/timm/optim/adamw.py b/pytorch-image-models/timm/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..07299ad63ef1bc712ccad016b6f2506403d92f36 --- /dev/null +++ b/pytorch-image-models/timm/optim/adamw.py @@ -0,0 +1,140 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference +""" +import math +from typing import Tuple + +import torch +from torch.optim.optimizer import Optimizer + +from ._types import ParamsT + + +class AdamWLegacy(Optimizer): + r"""Implements AdamW algorithm. + + NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference + + References: + - Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 + - Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 + - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ + + Args: + params: iterable of parameters to optimize or dicts defining parameter groups + lr: learning rate + betas: coefficients used for computing running averages of gradient and its square + eps: term added to the denominator to improve numerical stability + weight_decay: weight decay coefficient + amsgrad: whether to use the AMSGrad variant of this algorithm + from the paper `On the Convergence of Adam and Beyond` + caution: apply caution when using AdamW + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + caution: bool = False, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + caution=caution, + ) + super(AdamWLegacy, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamWLegacy, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + group.setdefault('caution', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + if group['caution']: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (exp_avg * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + exp_avg = exp_avg * mask + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/pytorch-image-models/timm/optim/adan.py b/pytorch-image-models/timm/optim/adan.py new file mode 100644 index 0000000000000000000000000000000000000000..94fa9ef208329317178f1e3fad58d970a2ac48c4 --- /dev/null +++ b/pytorch-image-models/timm/optim/adan.py @@ -0,0 +1,295 @@ +""" Adan Optimizer + +Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022. + https://arxiv.org/abs/2208.06677 + +Implementation adapted from https://github.com/sail-sg/Adan +""" +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Tuple + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + + +class MultiTensorApply(object): + available = False + warned = False + + def __init__(self, chunk_size): + try: + MultiTensorApply.available = True + self.chunk_size = chunk_size + except ImportError as err: + MultiTensorApply.available = False + MultiTensorApply.import_err = err + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args) + + +class Adan(Optimizer): + """ Implements a pytorch variant of Adan. + + Adan was proposed in Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models + https://arxiv.org/abs/2208.06677 + + Arguments: + params: Iterable of parameters to optimize or dicts defining parameter groups. + lr: Learning rate. + betas: Coefficients used for first- and second-order moments. + eps: Term added to the denominator to improve numerical stability. + weight_decay: Decoupled weight decay (L2 penalty) + no_prox: How to perform the weight decay + foreach: If True would use torch._foreach implementation. Faster but uses slightly more memory. + """ + + def __init__(self, + params, + lr: float = 1e-3, + betas: Tuple[float, float, float] = (0.98, 0.92, 0.99), + eps: float = 1e-8, + weight_decay: float = 0.0, + no_prox: bool = False, + foreach: bool = True, + ): + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format(betas[2])) + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + no_prox=no_prox, + foreach=foreach, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step.""" + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + neg_pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily supported by making it a tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1 ** group['step'] + bias_correction2 = 1.0 - beta2 ** group['step'] + bias_correction3 = 1.0 - beta3 ** group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'neg_pre_grad' not in state or group['step'] == 1: + state['neg_pre_grad'] = -p.grad.clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + neg_pre_grads.append(state['neg_pre_grad']) + + if not params_with_grad: + continue + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + neg_pre_grads=neg_pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + ) + + if group['foreach']: + _multi_tensor_adan(**kwargs) + else: + _single_tensor_adan(**kwargs) + + return loss + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + neg_grad_or_diff = neg_pre_grads[i] + + # for memory saving, we use `neg_grad_or_diff` to get some temp variable in an inplace way + neg_grad_or_diff.add_(grad) + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha=1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, neg_grad_or_diff, value=1 - beta3) # n_t + + denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps) + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + else: + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + param.div_(1 + lr * weight_decay) + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, +): + if len(params) == 0: + return + + # for memory saving, we use `neg_pre_grads` to get some temp variable in a inplace way + torch._foreach_add_(neg_pre_grads, grads) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, neg_pre_grads, alpha=1 - beta2) # diff_t + + torch._foreach_mul_(neg_pre_grads, beta2) + torch._foreach_add_(neg_pre_grads, grads) + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_(exp_avg_sqs, neg_pre_grads, neg_pre_grads, value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff) + else: + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff) + torch._foreach_div_(params, 1 + lr * weight_decay) + + torch._foreach_zero_(neg_pre_grads) + torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0) diff --git a/pytorch-image-models/timm/optim/lamb.py b/pytorch-image-models/timm/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..ee89225ec6cd92457c7d9b2198aa7b250cacde3a --- /dev/null +++ b/pytorch-image-models/timm/optim/lamb.py @@ -0,0 +1,224 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math +from typing import Optional, Tuple + +import torch +from torch.optim import Optimizer + +from ._types import ParamsT + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in: + - Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 + - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ + + Args: + params: Iterable of parameters to optimize or dicts defining parameter groups. + lr: Learning rate + betas: Coefficients used for computing running averages of gradient and its norm. + eps: Term added to the denominator to improve numerical stability. + weight_decay: Weight decay + grad_averaging: Whether apply (1-beta2) to grad when calculating running averages of gradient. + max_grad_norm: Value used to clip global grad norm. + trust_clip: Enable LAMBC trust ratio clipping. + always_adapt: Apply adaptive learning rate to 0.0 weight decay parameter. + caution: Apply caution. + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + bias_correction: bool = True, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-6, + weight_decay: float = 0.01, + grad_averaging: bool = True, + max_grad_norm: Optional[float] = 1.0, + trust_clip: bool = False, + always_adapt: bool = False, + caution: bool = False, + ): + defaults = dict( + lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm, + trust_clip=trust_clip, + always_adapt=always_adapt, + caution=caution, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('caution', False) + + def _get_clip_grad_norm(self): + max_grad_norm = self.defaults['max_grad_norm'] + if max_grad_norm is None: + return None + + norms = [] + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instead.') + norms.append(torch.linalg.vector_norm(grad)) + global_norm = torch.linalg.vector_norm(torch.stack(norms)) + clip_global_norm = (global_norm / max_grad_norm).clamp_(min=1.0) + return clip_global_norm + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + clip_grad_norm = self._get_clip_grad_norm() # None if disabled + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + if clip_grad_norm is not None: + grad.div_(clip_grad_norm) + + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + if group['caution']: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (update * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + update.mul_(mask) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + trust_ratio = w_norm / g_norm + # FIXME nested where required since logical and/or not working in PT XLA + # Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, 1.0), + 1.0, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.clamp(trust_ratio, max=1.0) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/pytorch-image-models/timm/optim/nadam.py b/pytorch-image-models/timm/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..46f6150bcb0cbe04b8eabc524283f4e9ce60a3a0 --- /dev/null +++ b/pytorch-image-models/timm/optim/nadam.py @@ -0,0 +1,106 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class NAdamLegacy(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__( + self, + params, + lr=2e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + schedule_decay=4e-3, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + schedule_decay=schedule_decay, + ) + super(NAdamLegacy, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/pytorch-image-models/timm/optim/rmsprop_tf.py b/pytorch-image-models/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..07b0279c857e2697d5f4c445dd51ff0e6e7fce29 --- /dev/null +++ b/pytorch-image-models/timm/optim/rmsprop_tf.py @@ -0,0 +1,169 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + +from ._types import ParamsT + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Args: + params: iterable of parameters to optimize or dicts defining parameter groups + lr: learning rate + momentum: momentum factor + alpha: smoothing (decay) constant + eps: term added to the denominator to improve numerical stability + centered: if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance + weight_decay: weight decay (L2 penalty) (default: 0) + decoupled_decay: decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum: learning rate scaling is included in the momentum buffer update as per defaults in Tensorflow + caution: apply caution + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1e-2, + alpha: float = 0.9, + eps: float = 1e-10, + weight_decay: float = 0, + momentum: float = 0., + centered: bool = False, + decoupled_decay: bool = False, + lr_in_momentum: bool = True, + caution: bool = False, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, + momentum=momentum, + alpha=alpha, + eps=eps, + centered=centered, + weight_decay=weight_decay, + decoupled_decay=decoupled_decay, + lr_in_momentum=lr_in_momentum, + caution=caution, + ) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + group.setdefault('caution', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + buf.mul_(group['momentum']) + + def _apply_caution(_m, _g): + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (_m * _g > 0).to(_g.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + return _m * mask + + if group['lr_in_momentum']: + # Tensorflow accumulates the LR scaling in the momentum buffer + buf.addcdiv_(grad, avg, value=group['lr']) + if group['caution']: + buf = _apply_caution(buf, grad) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.addcdiv_(grad, avg) + if group['caution']: + buf = _apply_caution(buf, grad) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/pytorch-image-models/timm/scheduler/__init__.py b/pytorch-image-models/timm/scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7191bb0f1c921a5e214b1414cd07269297db95 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs diff --git a/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03949a621b1b6da8a467f9e52f0c2ba5f3de8da3 Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fccf8840fd3092013612274c83ea4865fc6d55b Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..608ef6d96363e2e0daf8c92116ffe4922b7b3fee Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51961e28573e62c68f467b196e36f4bb5b358ea5 Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2291d3834540d74a245ce5ece54087240486065 Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fcbe0afc54de6e97aba1413f6b34fdcf8033b50 Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87c5cd0abab4b0fc3a29577beedfea9d44d75b0a Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57cf65ddc4edd4ef91272e8974be03cdd0f73fb0 Binary files /dev/null and b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/cosine_lr.py b/pytorch-image-models/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..00dd9357d974f0781478459879cbbfe2efbeb425 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/cosine_lr.py @@ -0,0 +1,117 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch +from typing import List + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning( + "Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + t = self.t_initial * cycles + else: + t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) + return t + self.warmup_t if self.warmup_prefix else t \ No newline at end of file diff --git a/pytorch-image-models/timm/scheduler/multistep_lr.py b/pytorch-image-models/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..e5db556d430e13c3536e43afadc32a5cfc3140f4 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/multistep_lr.py @@ -0,0 +1,63 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=True, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t + 1) + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs diff --git a/pytorch-image-models/timm/scheduler/plateau_lr.py b/pytorch-image-models/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..e868bd5e58afcc0dbf9c9ce1e89a866a125ead57 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/plateau_lr.py @@ -0,0 +1,111 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from typing import List + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__( + self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__( + optimizer, + 'lr', + noise_range_t=noise_range_t, + noise_type=noise_type, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self._is_apply_noise(epoch): + self._apply_noise(epoch) + + def step_update(self, num_updates: int, metric: float = None): + return None + + def _apply_noise(self, epoch): + noise = self._calculate_noise(epoch) + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr + + def _get_lr(self, t: int) -> List[float]: + assert False, 'should not be called as step is overridden' diff --git a/pytorch-image-models/timm/scheduler/poly_lr.py b/pytorch-image-models/timm/scheduler/poly_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f7971302ed4ef5fb9a15d5bde5269bd636a6fc1d --- /dev/null +++ b/pytorch-image-models/timm/scheduler/poly_lr.py @@ -0,0 +1,113 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging +from typing import List + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize + ) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + t = self.t_initial * cycles + else: + t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) + return t + self.warmup_t if self.warmup_prefix else t diff --git a/pytorch-image-models/timm/scheduler/scheduler.py b/pytorch-image-models/timm/scheduler/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..583357f7c522a0ce5091e435ac04b8e468751342 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/scheduler.py @@ -0,0 +1,127 @@ +import abc +from abc import ABC +from typing import Any, Dict, List, Optional + +import torch + + +class Scheduler(ABC): + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + t_in_epochs: bool = True, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True, + ) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.t_in_epochs = t_in_epochs + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + @abc.abstractmethod + def _get_lr(self, t: int) -> List[float]: + pass + + def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: + proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) + if not proceed: + return None + return self._get_lr(t) + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self._get_values(epoch, on_epoch=True) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self._get_values(num_updates, on_epoch=False) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + if 'lr_scale' in param_group: + param_group[self.param_group_field] = value * param_group['lr_scale'] + else: + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self._is_apply_noise(t): + noise = self._calculate_noise(t) + lrs = [v + v * noise for v in lrs] + return lrs + + def _is_apply_noise(self, t) -> bool: + """Return True if scheduler in noise range.""" + apply_noise = False + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + return apply_noise + + def _calculate_noise(self, t) -> float: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + return noise + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + return noise diff --git a/pytorch-image-models/timm/scheduler/scheduler_factory.py b/pytorch-image-models/timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..08c5e180c6447c4728384ce6eae32a5b9db4a954 --- /dev/null +++ b/pytorch-image-models/timm/scheduler/scheduler_factory.py @@ -0,0 +1,210 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List, Optional, Union + +from torch.optim import Optimizer + +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + + +def scheduler_kwargs(cfg, decreasing_metric: Optional[bool] = None): + """ cfg/argparse to kwargs helper + Convert scheduler args in argparse args or cfg (.dot) like object to keyword args. + """ + eval_metric = getattr(cfg, 'eval_metric', 'top1') + if decreasing_metric is not None: + plateau_mode = 'min' if decreasing_metric else 'max' + else: + plateau_mode = 'min' if 'loss' in eval_metric else 'max' + kwargs = dict( + sched=cfg.sched, + num_epochs=getattr(cfg, 'epochs', 100), + decay_epochs=getattr(cfg, 'decay_epochs', 30), + decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), + warmup_epochs=getattr(cfg, 'warmup_epochs', 5), + cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), + patience_epochs=getattr(cfg, 'patience_epochs', 10), + decay_rate=getattr(cfg, 'decay_rate', 0.1), + min_lr=getattr(cfg, 'min_lr', 0.), + warmup_lr=getattr(cfg, 'warmup_lr', 1e-5), + warmup_prefix=getattr(cfg, 'warmup_prefix', False), + noise=getattr(cfg, 'lr_noise', None), + noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), + noise_std=getattr(cfg, 'lr_noise_std', 1.), + noise_seed=getattr(cfg, 'seed', 42), + cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.), + cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), + k_decay=getattr(cfg, 'lr_k_decay', 1.0), + plateau_mode=plateau_mode, + step_on_epochs=not getattr(cfg, 'sched_on_updates', False), + ) + return kwargs + + +def create_scheduler( + args, + optimizer: Optimizer, + updates_per_epoch: int = 0, +): + return create_scheduler_v2( + optimizer=optimizer, + **scheduler_kwargs(args), + updates_per_epoch=updates_per_epoch, + ) + + +def create_scheduler_v2( + optimizer: Optimizer, + sched: str = 'cosine', + num_epochs: int = 300, + decay_epochs: int = 90, + decay_milestones: List[int] = (90, 180, 270), + cooldown_epochs: int = 0, + patience_epochs: int = 10, + decay_rate: float = 0.1, + min_lr: float = 0, + warmup_lr: float = 1e-5, + warmup_epochs: int = 0, + warmup_prefix: bool = False, + noise: Union[float, List[float]] = None, + noise_pct: float = 0.67, + noise_std: float = 1., + noise_seed: int = 42, + cycle_mul: float = 1., + cycle_decay: float = 0.1, + cycle_limit: int = 1, + k_decay: float = 1.0, + plateau_mode: str = 'max', + step_on_epochs: bool = True, + updates_per_epoch: int = 0, +): + t_initial = num_epochs + warmup_t = warmup_epochs + decay_t = decay_epochs + cooldown_t = cooldown_epochs + + if not step_on_epochs: + assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' + t_initial = t_initial * updates_per_epoch + warmup_t = warmup_t * updates_per_epoch + decay_t = decay_t * updates_per_epoch + decay_milestones = [d * updates_per_epoch for d in decay_milestones] + cooldown_t = cooldown_t * updates_per_epoch + + # warmup args + warmup_args = dict( + warmup_lr_init=warmup_lr, + warmup_t=warmup_t, + warmup_prefix=warmup_prefix, + ) + + # setup noise args for supporting schedulers + if noise is not None: + if isinstance(noise, (list, tuple)): + noise_range = [n * t_initial for n in noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = noise * t_initial + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + ) + + # setup cycle args for supporting schedulers + cycle_args = dict( + cycle_mul=cycle_mul, + cycle_decay=cycle_decay, + cycle_limit=cycle_limit, + ) + + lr_scheduler = None + if sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=t_initial, + lr_min=min_lr, + t_in_epochs=step_on_epochs, + **cycle_args, + **warmup_args, + **noise_args, + k_decay=k_decay, + ) + elif sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=t_initial, + lr_min=min_lr, + t_in_epochs=step_on_epochs, + **cycle_args, + **warmup_args, + **noise_args, + ) + elif sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=decay_t, + decay_rate=decay_rate, + t_in_epochs=step_on_epochs, + **warmup_args, + **noise_args, + ) + elif sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=decay_milestones, + decay_rate=decay_rate, + t_in_epochs=step_on_epochs, + **warmup_args, + **noise_args, + ) + elif sched == 'plateau': + assert step_on_epochs, 'Plateau LR only supports step per epoch.' + warmup_args.pop('warmup_prefix', False) + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=decay_rate, + patience_t=patience_epochs, + cooldown_t=0, + **warmup_args, + lr_min=min_lr, + mode=plateau_mode, + **noise_args, + ) + elif sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=t_initial, + lr_min=min_lr, + t_in_epochs=step_on_epochs, + k_decay=k_decay, + **cycle_args, + **warmup_args, + **noise_args, + ) + + if hasattr(lr_scheduler, 'get_cycle_length'): + # For cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown + # NOTE: Warmup prefix added in get_cycle_lengths() if enabled + t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t + if step_on_epochs: + num_epochs = t_with_cycles_and_cooldown + else: + num_epochs = t_with_cycles_and_cooldown // updates_per_epoch + else: + if warmup_prefix: + num_epochs += warmup_epochs + + return lr_scheduler, num_epochs diff --git a/pytorch-image-models/timm/scheduler/step_lr.py b/pytorch-image-models/timm/scheduler/step_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..c205d437153f0960f864fddaa043f4028db3de3a --- /dev/null +++ b/pytorch-image-models/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch +from typing import List + + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=True, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs diff --git a/pytorch-image-models/timm/utils/__init__.py b/pytorch-image-models/timm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9093b75a821145b055f809e01def6b9c3afce849 --- /dev/null +++ b/pytorch-image-models/timm/utils/__init__.py @@ -0,0 +1,16 @@ +from .agc import adaptive_clip_grad +from .attention_extract import AttentionExtract +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .decay_batch import decay_batch_step, check_batch_size_retry +from .distributed import distribute_bn, reduce_tensor, init_distributed_device,\ + world_info_from_env, is_distributed_env, is_primary +from .jit import set_jit_legacy, set_jit_fuser +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg, ParseKwargs +from .model import unwrap_model, get_state_dict, freeze, unfreeze, reparameterize_model +from .model_ema import ModelEma, ModelEmaV2, ModelEmaV3 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/pytorch-image-models/timm/utils/agc.py b/pytorch-image-models/timm/utils/agc.py new file mode 100644 index 0000000000000000000000000000000000000000..f51401726ff6810d97d0fa567f4e31b474325a59 --- /dev/null +++ b/pytorch-image-models/timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/pytorch-image-models/timm/utils/attention_extract.py b/pytorch-image-models/timm/utils/attention_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..e813d42a0d3675383a9514e24d1eeeabc5924d69 --- /dev/null +++ b/pytorch-image-models/timm/utils/attention_extract.py @@ -0,0 +1,85 @@ +import fnmatch +import re +from collections import OrderedDict +from typing import Union, Optional, List + +import torch + + +class AttentionExtract(torch.nn.Module): + # defaults should cover a significant number of timm models with attention maps. + default_node_names = ['*attn.softmax'] + default_module_names = ['*attn_drop'] + + def __init__( + self, + model: Union[torch.nn.Module], + names: Optional[List[str]] = None, + mode: str = 'eval', + method: str = 'fx', + hook_type: str = 'forward', + use_regex: bool = False, + ): + """ Extract attention maps (or other activations) from a model by name. + + Args: + model: Instantiated model to extract from. + names: List of concrete or wildcard names to extract. Names are nodes for fx and modules for hooks. + mode: 'train' or 'eval' model mode. + method: 'fx' or 'hook' extraction method. + hook_type: 'forward' or 'forward_pre' hooks used. + use_regex: Use regex instead of fnmatch + """ + super().__init__() + assert mode in ('train', 'eval') + if mode == 'train': + model = model.train() + else: + model = model.eval() + + assert method in ('fx', 'hook') + if method == 'fx': + # names are activation node names + from timm.models._features_fx import get_graph_node_names, GraphExtractNet + + node_names = get_graph_node_names(model)[0 if mode == 'train' else 1] + names = names or self.default_node_names + if use_regex: + regexes = [re.compile(r) for r in names] + matched = [g for g in node_names if any([r.match(g) for r in regexes])] + else: + matched = [g for g in node_names if any([fnmatch.fnmatch(g, n) for n in names])] + if not matched: + raise RuntimeError(f'No node names found matching {names}.') + + self.model = GraphExtractNet(model, matched, return_dict=True) + self.hooks = None + else: + # names are module names + assert hook_type in ('forward', 'forward_pre') + from timm.models._features import FeatureHooks + + module_names = [n for n, m in model.named_modules()] + names = names or self.default_module_names + if use_regex: + regexes = [re.compile(r) for r in names] + matched = [m for m in module_names if any([r.match(m) for r in regexes])] + else: + matched = [m for m in module_names if any([fnmatch.fnmatch(m, n) for n in names])] + if not matched: + raise RuntimeError(f'No module names found matching {names}.') + + self.model = model + self.hooks = FeatureHooks(matched, model.named_modules(), default_hook_type=hook_type) + + self.names = matched + self.mode = mode + self.method = method + + def forward(self, x): + if self.hooks is not None: + self.model(x) + output = self.hooks.get_output(device=x.device) + else: + output = self.model(x) + return output diff --git a/pytorch-image-models/timm/utils/clip_grad.py b/pytorch-image-models/timm/utils/clip_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb40697a221edd6d8e622ff3306dad5e58afd94 --- /dev/null +++ b/pytorch-image-models/timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/pytorch-image-models/timm/utils/cuda.py b/pytorch-image-models/timm/utils/cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a2770c7515ff57c3aca7d46f4ce6cf62a2e67b --- /dev/null +++ b/pytorch-image-models/timm/utils/cuda.py @@ -0,0 +1,78 @@ +""" CUDA / AMP utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +try: + from apex import amp + has_apex = True +except ImportError: + amp = None + has_apex = False + +from .clip_grad import dispatch_clip_grad + + +class ApexScaler: + state_dict_key = "amp" + + def __call__( + self, + loss, + optimizer, + clip_grad=None, + clip_mode='norm', + parameters=None, + create_graph=False, + need_update=True, + ): + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward(create_graph=create_graph) + if need_update: + if clip_grad is not None: + dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) + optimizer.step() + + def state_dict(self): + if 'state_dict' in amp.__dict__: + return amp.state_dict() + + def load_state_dict(self, state_dict): + if 'load_state_dict' in amp.__dict__: + amp.load_state_dict(state_dict) + + +class NativeScaler: + state_dict_key = "amp_scaler" + + def __init__(self, device='cuda'): + try: + self._scaler = torch.amp.GradScaler(device=device) + except (AttributeError, TypeError) as e: + self._scaler = torch.cuda.amp.GradScaler() + + def __call__( + self, + loss, + optimizer, + clip_grad=None, + clip_mode='norm', + parameters=None, + create_graph=False, + need_update=True, + ): + self._scaler.scale(loss).backward(create_graph=create_graph) + if need_update: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) + self._scaler.step(optimizer) + self._scaler.update() + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) diff --git a/pytorch-image-models/timm/utils/decay_batch.py b/pytorch-image-models/timm/utils/decay_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..852fa4b8dc3d46932b67ed3e42170a5de92415d9 --- /dev/null +++ b/pytorch-image-models/timm/utils/decay_batch.py @@ -0,0 +1,43 @@ +""" Batch size decay and retry helpers. + +Copyright 2022 Ross Wightman +""" +import math + + +def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False): + """ power of two batch-size decay with intra steps + + Decay by stepping between powers of 2: + * determine power-of-2 floor of current batch size (base batch size) + * divide above value by num_intra_steps to determine step size + * floor batch_size to nearest multiple of step_size (from base batch size) + Examples: + num_steps == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1 + num_steps (no_odd=True) == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 6, 4, 2 + num_steps == 2 --> 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1 + num_steps == 1 --> 64, 32, 16, 8, 4, 2, 1 + """ + if batch_size <= 1: + # return 0 for stopping value so easy to use in loop + return 0 + base_batch_size = int(2 ** (math.log(batch_size - 1) // math.log(2))) + step_size = max(base_batch_size // num_intra_steps, 1) + batch_size = base_batch_size + ((batch_size - base_batch_size - 1) // step_size) * step_size + if no_odd and batch_size % 2: + batch_size -= 1 + return batch_size + + +def check_batch_size_retry(error_str): + """ check failure error string for conditions where batch decay retry should not be attempted + """ + error_str = error_str.lower() + if 'required rank' in error_str: + # Errors involving phrase 'required rank' typically happen when a conv is used that's + # not compatible with channels_last memory format. + return False + if 'illegal' in error_str: + # 'Illegal memory access' errors in CUDA typically leave process in unusable state + return False + return True diff --git a/pytorch-image-models/timm/utils/log.py b/pytorch-image-models/timm/utils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..c99469e0884f3e45905ef7c7f0d1e491092697ad --- /dev/null +++ b/pytorch-image-models/timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/pytorch-image-models/timm/utils/misc.py b/pytorch-image-models/timm/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..326a50f7274fa6c4e25b2e00dda5a9b388aec2a0 --- /dev/null +++ b/pytorch-image-models/timm/utils/misc.py @@ -0,0 +1,32 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import argparse +import ast +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) + + +class ParseKwargs(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + kw = {} + for value in values: + key, value = value.split('=') + try: + kw[key] = ast.literal_eval(value) + except ValueError: + kw[key] = str(value) # fallback to string (avoid need to escape on command line) + setattr(namespace, self.dest, kw)