Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc +0 -0
- pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc +0 -0
- pytorch-image-models/timm/optim/_types.py +25 -0
- pytorch-image-models/timm/optim/adafactor_bv.py +320 -0
- pytorch-image-models/timm/optim/adamp.py +120 -0
- pytorch-image-models/timm/optim/adamw.py +140 -0
- pytorch-image-models/timm/optim/adan.py +295 -0
- pytorch-image-models/timm/optim/lamb.py +224 -0
- pytorch-image-models/timm/optim/nadam.py +106 -0
- pytorch-image-models/timm/optim/rmsprop_tf.py +169 -0
- pytorch-image-models/timm/scheduler/__init__.py +8 -0
- pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc +0 -0
- pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc +0 -0
- pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc +0 -0
pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc
ADDED
Binary file (4.93 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc
ADDED
Binary file (21.6 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc
ADDED
Binary file (19.4 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc
ADDED
Binary file (28.3 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc
ADDED
Binary file (22.9 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc
ADDED
Binary file (4.99 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc
ADDED
Binary file (11.4 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc
ADDED
Binary file (15 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc
ADDED
Binary file (35.1 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc
ADDED
Binary file (12.5 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc
ADDED
Binary file (34.2 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc
ADDED
Binary file (16.2 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc
ADDED
Binary file (7.52 kB). View file
|
|
pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc
ADDED
Binary file (12 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (1.6 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc
ADDED
Binary file (28 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc
ADDED
Binary file (3.85 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc
ADDED
Binary file (1.09 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc
ADDED
Binary file (6.36 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc
ADDED
Binary file (7 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc
ADDED
Binary file (5.86 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc
ADDED
Binary file (3.14 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc
ADDED
Binary file (4.1 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc
ADDED
Binary file (6.42 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc
ADDED
Binary file (10.8 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc
ADDED
Binary file (5.44 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc
ADDED
Binary file (3.27 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc
ADDED
Binary file (3.78 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc
ADDED
Binary file (5.82 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc
ADDED
Binary file (2.62 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc
ADDED
Binary file (4.92 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc
ADDED
Binary file (4.84 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc
ADDED
Binary file (9.55 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc
ADDED
Binary file (3.79 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc
ADDED
Binary file (2.93 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc
ADDED
Binary file (4.97 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc
ADDED
Binary file (2.01 kB). View file
|
|
pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc
ADDED
Binary file (6.2 kB). View file
|
|
pytorch-image-models/timm/optim/_types.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Iterable, Union, Protocol, Type
|
2 |
+
try:
|
3 |
+
from typing import TypeAlias, TypeVar
|
4 |
+
except ImportError:
|
5 |
+
from typing_extensions import TypeAlias, TypeVar
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.optim
|
9 |
+
|
10 |
+
try:
|
11 |
+
from torch.optim.optimizer import ParamsT
|
12 |
+
except (ImportError, TypeError):
|
13 |
+
ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
|
14 |
+
|
15 |
+
|
16 |
+
OptimType = Type[torch.optim.Optimizer]
|
17 |
+
|
18 |
+
|
19 |
+
class OptimizerCallable(Protocol):
|
20 |
+
"""Protocol for optimizer constructor signatures."""
|
21 |
+
|
22 |
+
def __call__(self, params: ParamsT, **kwargs) -> torch.optim.Optimizer: ...
|
23 |
+
|
24 |
+
|
25 |
+
__all__ = ['ParamsT', 'OptimType', 'OptimizerCallable']
|
pytorch-image-models/timm/optim/adafactor_bv.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Adafactor (Big Vision variant) for PyTorch
|
2 |
+
|
3 |
+
Adapted from the implementation in big vision: https://github.com/google-research/big_vision
|
4 |
+
|
5 |
+
Described in 'Scaling Vision Transformers': https://arxiv.org/abs/2106.04560
|
6 |
+
|
7 |
+
Adaptation and PyTorch modifications by Ross Wightman
|
8 |
+
"""
|
9 |
+
from typing import List, Optional, Tuple, Union
|
10 |
+
|
11 |
+
import torch
|
12 |
+
from torch import Tensor
|
13 |
+
from torch.optim import Optimizer
|
14 |
+
|
15 |
+
from ._types import ParamsT
|
16 |
+
|
17 |
+
|
18 |
+
def _get_scalar_dtype():
|
19 |
+
"""Get the scalar dtype that the optimizer uses for state"""
|
20 |
+
return torch.float64
|
21 |
+
|
22 |
+
|
23 |
+
def _factored_dims(
|
24 |
+
shape: Tuple[int, ...],
|
25 |
+
factored: bool,
|
26 |
+
min_dim_size_to_factor: int
|
27 |
+
) -> Optional[tuple[int, int]]:
|
28 |
+
"""Whether to use a factored second moment estimator.
|
29 |
+
|
30 |
+
This function returns a tuple with the two largest axes to reduce over.
|
31 |
+
If no two dimensions have size >= min_dim_size_to_factor, return None.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
shape: an input shape
|
35 |
+
factored: whether to use factored second-moment estimator for > 2d vars.
|
36 |
+
min_dim_size_to_factor: only factor accumulator if two array dimensions have at least this size.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
None or a tuple of ints
|
40 |
+
"""
|
41 |
+
if not factored or len(shape) < 2:
|
42 |
+
return None
|
43 |
+
sorted_dims = sorted(((x, i) for i, x in enumerate(shape)))
|
44 |
+
if shape[sorted_dims[-2][1]] < min_dim_size_to_factor:
|
45 |
+
return None
|
46 |
+
return int(sorted_dims[-2][1]), int(sorted_dims[-1][1])
|
47 |
+
|
48 |
+
|
49 |
+
class AdafactorBigVision(Optimizer):
|
50 |
+
"""
|
51 |
+
PyTorch implementation of BigVision's Adafactor variant with both single and multi tensor implementations.
|
52 |
+
|
53 |
+
Adapted from https://github.com/google-research/big_vision by Ross Wightman
|
54 |
+
"""
|
55 |
+
|
56 |
+
def __init__(
|
57 |
+
self,
|
58 |
+
params: ParamsT,
|
59 |
+
lr: float = 1.0,
|
60 |
+
min_dim_size_to_factor: int = 16,
|
61 |
+
decay_rate: float = 0.8,
|
62 |
+
decay_offset: int = 0,
|
63 |
+
beta2_cap: float = 0.999,
|
64 |
+
momentum: Optional[float] = 0.9,
|
65 |
+
momentum_dtype: Union[str, torch.dtype] = torch.bfloat16,
|
66 |
+
eps: Optional[float] = None,
|
67 |
+
weight_decay: float = 0.0,
|
68 |
+
clipping_threshold: Optional[float] = None,
|
69 |
+
unscaled_wd: bool = False,
|
70 |
+
caution: bool = False,
|
71 |
+
*,
|
72 |
+
foreach: Optional[bool] = False,
|
73 |
+
):
|
74 |
+
if isinstance(momentum_dtype, str):
|
75 |
+
if momentum_dtype == 'float16':
|
76 |
+
momentum_dtype = torch.float16
|
77 |
+
elif momentum_dtype == 'bfloat16':
|
78 |
+
momentum_dtype = torch.bfloat16
|
79 |
+
else:
|
80 |
+
assert momentum_dtype == 'float32', f'{momentum_dtype} dtype not supported'
|
81 |
+
momentum_dtype = torch.float32
|
82 |
+
# FIXME try to check if momentum dtype is appropriate for device? Torch API not great for this.
|
83 |
+
|
84 |
+
defaults = dict(
|
85 |
+
lr=lr,
|
86 |
+
min_dim_size_to_factor=min_dim_size_to_factor,
|
87 |
+
decay_rate=decay_rate,
|
88 |
+
decay_offset=decay_offset,
|
89 |
+
beta2_cap=beta2_cap,
|
90 |
+
momentum=momentum,
|
91 |
+
momentum_dtype=momentum_dtype,
|
92 |
+
eps=eps,
|
93 |
+
weight_decay=weight_decay,
|
94 |
+
clipping_threshold=clipping_threshold,
|
95 |
+
unscaled_wd=unscaled_wd,
|
96 |
+
caution=caution,
|
97 |
+
foreach=foreach,
|
98 |
+
)
|
99 |
+
super().__init__(params, defaults)
|
100 |
+
|
101 |
+
def __setstate__(self, state):
|
102 |
+
super().__setstate__(state)
|
103 |
+
for group in self.param_groups:
|
104 |
+
group.setdefault('caution', False)
|
105 |
+
group.setdefault('foreach', None)
|
106 |
+
for p in group['params']:
|
107 |
+
p_state = self.state.get(p, {})
|
108 |
+
if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
|
109 |
+
p_state['step'] = torch.tensor(float(p_state['step']), dtype=_get_scalar_dtype())
|
110 |
+
|
111 |
+
if 'exp_avg' in p_state and torch.is_tensor(p_state['exp_avg']):
|
112 |
+
# FIXME this is a bit of a hack, optimizer.load_state_dict appears to upcast
|
113 |
+
# the momentum to float32 (it's half precision in the state_dict), need to
|
114 |
+
# look into this further. Better to override _process_value_according_to_param_policy?
|
115 |
+
p_state['exp_avg'] = p_state['exp_avg'].to(dtype=self.defaults['momentum_dtype'])
|
116 |
+
|
117 |
+
@torch.no_grad()
|
118 |
+
def step(self, closure=None):
|
119 |
+
loss = None
|
120 |
+
if closure is not None:
|
121 |
+
with torch.enable_grad():
|
122 |
+
loss = closure()
|
123 |
+
|
124 |
+
for group in self.param_groups:
|
125 |
+
params_with_grad = []
|
126 |
+
grads = []
|
127 |
+
exp_avg_sq_rs = []
|
128 |
+
exp_avg_sq_cs = []
|
129 |
+
exp_avg_sqs = []
|
130 |
+
state_steps = []
|
131 |
+
exp_avgs = [] # For momentum
|
132 |
+
|
133 |
+
for p in group['params']:
|
134 |
+
if p.grad is None:
|
135 |
+
continue
|
136 |
+
|
137 |
+
if p.grad.is_sparse:
|
138 |
+
raise RuntimeError("Sparse gradients not supported")
|
139 |
+
|
140 |
+
params_with_grad.append(p)
|
141 |
+
grads.append(p.grad)
|
142 |
+
|
143 |
+
state = self.state[p]
|
144 |
+
|
145 |
+
if len(state) == 0:
|
146 |
+
# NOTE step on CPU, probably need some more though to make capturable
|
147 |
+
state['step'] = torch.tensor(0.0, dtype=_get_scalar_dtype())
|
148 |
+
|
149 |
+
shape = p.grad.shape
|
150 |
+
factored_dims = _factored_dims(
|
151 |
+
shape,
|
152 |
+
factored=True,
|
153 |
+
min_dim_size_to_factor=self.defaults['min_dim_size_to_factor']
|
154 |
+
)
|
155 |
+
|
156 |
+
if factored_dims is not None:
|
157 |
+
dc, dr = factored_dims
|
158 |
+
row_shape = list(p.grad.shape)
|
159 |
+
row_shape[dr] = 1
|
160 |
+
col_shape = list(p.grad.shape)
|
161 |
+
col_shape[dc] = 1
|
162 |
+
state['exp_avg_sq_r'] = p.grad.new_zeros(row_shape)
|
163 |
+
state['exp_avg_sq_c'] = p.grad.new_zeros(col_shape)
|
164 |
+
else:
|
165 |
+
state['exp_avg_sq'] = torch.zeros_like(p.grad, memory_format=torch.preserve_format)
|
166 |
+
|
167 |
+
if self.defaults['momentum'] is not None:
|
168 |
+
state['exp_avg'] = torch.zeros_like(p.grad, dtype=self.defaults['momentum_dtype'])
|
169 |
+
|
170 |
+
state_steps.append(state['step'])
|
171 |
+
exp_avg_sq_rs.append(state.get('exp_avg_sq_r', None))
|
172 |
+
exp_avg_sq_cs.append(state.get('exp_avg_sq_c', None))
|
173 |
+
exp_avg_sqs.append(state.get('exp_avg_sq', None))
|
174 |
+
exp_avgs.append(state.get('exp_avg', None))
|
175 |
+
|
176 |
+
if group['foreach']:
|
177 |
+
func = _multi_tensor_adafactor
|
178 |
+
else:
|
179 |
+
func = _single_tensor_adafactor
|
180 |
+
|
181 |
+
func(
|
182 |
+
params=params_with_grad,
|
183 |
+
grads=grads,
|
184 |
+
exp_avg_sq_rs=exp_avg_sq_rs,
|
185 |
+
exp_avg_sq_cs=exp_avg_sq_cs,
|
186 |
+
exp_avg_sqs=exp_avg_sqs,
|
187 |
+
exp_avgs=exp_avgs,
|
188 |
+
state_steps=state_steps,
|
189 |
+
beta2_decay=group['decay_rate'],
|
190 |
+
beta2_cap=group['beta2_cap'],
|
191 |
+
min_dim_size_to_factor=group['min_dim_size_to_factor'],
|
192 |
+
eps=group['eps'],
|
193 |
+
lr=group['lr'],
|
194 |
+
weight_decay=group['weight_decay'],
|
195 |
+
momentum=group['momentum'],
|
196 |
+
momentum_dtype=group['momentum_dtype'],
|
197 |
+
clipping_threshold=group['clipping_threshold'],
|
198 |
+
unscaled_wd=group['unscaled_wd'],
|
199 |
+
caution=group['caution'],
|
200 |
+
)
|
201 |
+
|
202 |
+
return loss
|
203 |
+
|
204 |
+
|
205 |
+
def _single_tensor_adafactor(
|
206 |
+
params: List[Tensor],
|
207 |
+
grads: List[Tensor],
|
208 |
+
exp_avg_sq_rs: List[Optional[Tensor]],
|
209 |
+
exp_avg_sq_cs: List[Optional[Tensor]],
|
210 |
+
exp_avg_sqs: List[Optional[Tensor]],
|
211 |
+
exp_avgs: List[Optional[Tensor]],
|
212 |
+
state_steps: List[Tensor],
|
213 |
+
*,
|
214 |
+
beta2_decay: float,
|
215 |
+
beta2_cap: float,
|
216 |
+
min_dim_size_to_factor: int,
|
217 |
+
eps: float,
|
218 |
+
lr: float,
|
219 |
+
weight_decay: float,
|
220 |
+
momentum: Optional[float],
|
221 |
+
momentum_dtype: Union[str, torch.dtype],
|
222 |
+
clipping_threshold: Optional[float],
|
223 |
+
unscaled_wd: bool,
|
224 |
+
caution: bool,
|
225 |
+
):
|
226 |
+
for i, param in enumerate(params):
|
227 |
+
grad = grads[i]
|
228 |
+
exp_avg_sq_r = exp_avg_sq_rs[i]
|
229 |
+
exp_avg_sq_c = exp_avg_sq_cs[i]
|
230 |
+
exp_avg_sq = exp_avg_sqs[i]
|
231 |
+
exp_avg = exp_avgs[i]
|
232 |
+
step_t = state_steps[i]
|
233 |
+
if eps is None:
|
234 |
+
# default eps for avoiding div by zero, diff from float type eps
|
235 |
+
eps = 1e-7 if grad.dtype == torch.float16 else 1e-30
|
236 |
+
|
237 |
+
# Update step
|
238 |
+
step_t += 1
|
239 |
+
beta2_t = min(beta2_cap, 1.0 - float(step_t) ** (-beta2_decay))
|
240 |
+
one_minus_beta2_t = 1 - beta2_t
|
241 |
+
|
242 |
+
grad_sqr = torch.square(grad) + eps
|
243 |
+
# NOTE application of eps (epsilon1) mirrors the optax/big vision/t5x approach
|
244 |
+
if exp_avg_sq is None:
|
245 |
+
# factorized second moment
|
246 |
+
dc, dr = _factored_dims(grad.shape, True, min_dim_size_to_factor=min_dim_size_to_factor)
|
247 |
+
exp_avg_sq_r.lerp_(grad_sqr.mean(dim=dr, keepdim=True), one_minus_beta2_t)
|
248 |
+
exp_avg_sq_c.lerp_(grad_sqr.mean(dim=dc, keepdim=True), one_minus_beta2_t)
|
249 |
+
|
250 |
+
reduce_dc = dc - 1 if dc > dr else dc
|
251 |
+
row_col_mean = exp_avg_sq_r.mean(dim=reduce_dc, keepdim=True)
|
252 |
+
row_factor = (exp_avg_sq_r / row_col_mean).rsqrt()
|
253 |
+
col_factor = exp_avg_sq_c.rsqrt()
|
254 |
+
|
255 |
+
update = grad * row_factor * col_factor
|
256 |
+
else:
|
257 |
+
# non-factorized second moment
|
258 |
+
assert exp_avg_sq_r is None and exp_avg_sq_c is None
|
259 |
+
exp_avg_sq.lerp_(grad_sqr, one_minus_beta2_t)
|
260 |
+
update = grad * exp_avg_sq.rsqrt()
|
261 |
+
|
262 |
+
# Clip by RMS value
|
263 |
+
if clipping_threshold is not None:
|
264 |
+
denom = (update.norm(2) / ((update.numel() ** 0.5) / clipping_threshold)).clamp_(max=1.0)
|
265 |
+
update.div_(denom)
|
266 |
+
|
267 |
+
# Apply momentum (in different dtype)
|
268 |
+
if momentum is not None and exp_avg is not None:
|
269 |
+
if momentum_dtype != grad.dtype:
|
270 |
+
exp_avg.lerp_(update.to(momentum_dtype), 1 - momentum) # ema
|
271 |
+
update = exp_avg.to(grad.dtype)
|
272 |
+
else:
|
273 |
+
exp_avg.lerp_(update, 1 - momentum) # ema
|
274 |
+
update = exp_avg.clone()
|
275 |
+
|
276 |
+
if caution:
|
277 |
+
# apply caution as per 'Cautious Optimizers': https://arxiv.org/abs/2411.16085
|
278 |
+
mask = (update * grad > 0).to(grad.dtype)
|
279 |
+
mask.div_(mask.mean().clamp_(min=1e-3))
|
280 |
+
update.mul_(mask)
|
281 |
+
|
282 |
+
# Scale by learning rate
|
283 |
+
update.mul_(lr)
|
284 |
+
|
285 |
+
# Perform weight decay
|
286 |
+
if weight_decay != 0:
|
287 |
+
if unscaled_wd:
|
288 |
+
# match big vision impl, 'fully decoupled' decay w/o LR scaling
|
289 |
+
param.mul_(1. - weight_decay)
|
290 |
+
else:
|
291 |
+
# match typical pytorch behaviour for decoupled decay, eg adamw where wd is scaled by LR
|
292 |
+
param.mul_(1. - lr * weight_decay)
|
293 |
+
|
294 |
+
# Update parameters
|
295 |
+
param.add_(update, alpha=-1.0)
|
296 |
+
|
297 |
+
|
298 |
+
def _multi_tensor_adafactor(
|
299 |
+
params: List[Tensor],
|
300 |
+
grads: List[Tensor],
|
301 |
+
exp_avg_sq_rs: List[Optional[Tensor]],
|
302 |
+
exp_avg_sq_cs: List[Optional[Tensor]],
|
303 |
+
exp_avg_sqs: List[Optional[Tensor]],
|
304 |
+
exp_avgs: List[Optional[Tensor]],
|
305 |
+
state_steps: List[Tensor],
|
306 |
+
*,
|
307 |
+
beta2_decay: float,
|
308 |
+
beta2_cap: float,
|
309 |
+
min_dim_size_to_factor: int,
|
310 |
+
eps: float,
|
311 |
+
lr: float,
|
312 |
+
weight_decay: float,
|
313 |
+
momentum: Optional[float],
|
314 |
+
momentum_dtype: Union[str, torch.dtype],
|
315 |
+
clipping_threshold: Optional[float],
|
316 |
+
unscaled_wd: bool,
|
317 |
+
caution: bool,
|
318 |
+
):
|
319 |
+
# FIXME TODO
|
320 |
+
assert False, 'multi-tensor fn (foreach=True) not implemented yet'
|
pytorch-image-models/timm/optim/adamp.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py
|
3 |
+
|
4 |
+
Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217
|
5 |
+
Code: https://github.com/clovaai/AdamP
|
6 |
+
|
7 |
+
Copyright (c) 2020-present NAVER Corp.
|
8 |
+
MIT license
|
9 |
+
"""
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from torch.optim.optimizer import Optimizer
|
14 |
+
import math
|
15 |
+
|
16 |
+
|
17 |
+
def _channel_view(x) -> torch.Tensor:
|
18 |
+
return x.reshape(x.size(0), -1)
|
19 |
+
|
20 |
+
|
21 |
+
def _layer_view(x) -> torch.Tensor:
|
22 |
+
return x.reshape(1, -1)
|
23 |
+
|
24 |
+
|
25 |
+
def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float):
|
26 |
+
wd = 1.
|
27 |
+
expand_size = (-1,) + (1,) * (len(p.shape) - 1)
|
28 |
+
for view_func in [_channel_view, _layer_view]:
|
29 |
+
param_view = view_func(p)
|
30 |
+
grad_view = view_func(grad)
|
31 |
+
cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_()
|
32 |
+
|
33 |
+
# FIXME this is a problem for PyTorch XLA
|
34 |
+
if cosine_sim.max() < delta / math.sqrt(param_view.size(1)):
|
35 |
+
p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size)
|
36 |
+
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size)
|
37 |
+
wd = wd_ratio
|
38 |
+
return perturb, wd
|
39 |
+
|
40 |
+
return perturb, wd
|
41 |
+
|
42 |
+
|
43 |
+
class AdamP(Optimizer):
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
params,
|
47 |
+
lr=1e-3,
|
48 |
+
betas=(0.9, 0.999),
|
49 |
+
eps=1e-8,
|
50 |
+
weight_decay=0,
|
51 |
+
delta=0.1,
|
52 |
+
wd_ratio=0.1,
|
53 |
+
nesterov=False,
|
54 |
+
):
|
55 |
+
defaults = dict(
|
56 |
+
lr=lr,
|
57 |
+
betas=betas,
|
58 |
+
eps=eps,
|
59 |
+
weight_decay=weight_decay,
|
60 |
+
delta=delta,
|
61 |
+
wd_ratio=wd_ratio,
|
62 |
+
nesterov=nesterov,
|
63 |
+
)
|
64 |
+
super(AdamP, self).__init__(params, defaults)
|
65 |
+
|
66 |
+
@torch.no_grad()
|
67 |
+
def step(self, closure=None):
|
68 |
+
loss = None
|
69 |
+
if closure is not None:
|
70 |
+
with torch.enable_grad():
|
71 |
+
loss = closure()
|
72 |
+
|
73 |
+
for group in self.param_groups:
|
74 |
+
for p in group['params']:
|
75 |
+
if p.grad is None:
|
76 |
+
continue
|
77 |
+
|
78 |
+
grad = p.grad
|
79 |
+
beta1, beta2 = group['betas']
|
80 |
+
nesterov = group['nesterov']
|
81 |
+
|
82 |
+
state = self.state[p]
|
83 |
+
|
84 |
+
# State initialization
|
85 |
+
if len(state) == 0:
|
86 |
+
state['step'] = 0
|
87 |
+
state['exp_avg'] = torch.zeros_like(p)
|
88 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
89 |
+
|
90 |
+
# Adam
|
91 |
+
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
92 |
+
|
93 |
+
state['step'] += 1
|
94 |
+
bias_correction1 = 1 - beta1 ** state['step']
|
95 |
+
bias_correction2 = 1 - beta2 ** state['step']
|
96 |
+
|
97 |
+
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
|
98 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
99 |
+
|
100 |
+
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
|
101 |
+
step_size = group['lr'] / bias_correction1
|
102 |
+
|
103 |
+
if nesterov:
|
104 |
+
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
|
105 |
+
else:
|
106 |
+
perturb = exp_avg / denom
|
107 |
+
|
108 |
+
# Projection
|
109 |
+
wd_ratio = 1.
|
110 |
+
if len(p.shape) > 1:
|
111 |
+
perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
|
112 |
+
|
113 |
+
# Weight decay
|
114 |
+
if group['weight_decay'] > 0:
|
115 |
+
p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio)
|
116 |
+
|
117 |
+
# Step
|
118 |
+
p.add_(perturb, alpha=-step_size)
|
119 |
+
|
120 |
+
return loss
|
pytorch-image-models/timm/optim/adamw.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" AdamW Optimizer
|
2 |
+
Impl copied from PyTorch master
|
3 |
+
|
4 |
+
NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference
|
5 |
+
"""
|
6 |
+
import math
|
7 |
+
from typing import Tuple
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch.optim.optimizer import Optimizer
|
11 |
+
|
12 |
+
from ._types import ParamsT
|
13 |
+
|
14 |
+
|
15 |
+
class AdamWLegacy(Optimizer):
|
16 |
+
r"""Implements AdamW algorithm.
|
17 |
+
|
18 |
+
NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference
|
19 |
+
|
20 |
+
References:
|
21 |
+
- Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980
|
22 |
+
- Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101
|
23 |
+
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
|
24 |
+
|
25 |
+
Args:
|
26 |
+
params: iterable of parameters to optimize or dicts defining parameter groups
|
27 |
+
lr: learning rate
|
28 |
+
betas: coefficients used for computing running averages of gradient and its square
|
29 |
+
eps: term added to the denominator to improve numerical stability
|
30 |
+
weight_decay: weight decay coefficient
|
31 |
+
amsgrad: whether to use the AMSGrad variant of this algorithm
|
32 |
+
from the paper `On the Convergence of Adam and Beyond`
|
33 |
+
caution: apply caution when using AdamW
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
params: ParamsT,
|
39 |
+
lr: float = 1e-3,
|
40 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
41 |
+
eps: float = 1e-8,
|
42 |
+
weight_decay: float = 1e-2,
|
43 |
+
amsgrad: bool = False,
|
44 |
+
caution: bool = False,
|
45 |
+
):
|
46 |
+
if not 0.0 <= lr:
|
47 |
+
raise ValueError("Invalid learning rate: {}".format(lr))
|
48 |
+
if not 0.0 <= eps:
|
49 |
+
raise ValueError("Invalid epsilon value: {}".format(eps))
|
50 |
+
if not 0.0 <= betas[0] < 1.0:
|
51 |
+
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
52 |
+
if not 0.0 <= betas[1] < 1.0:
|
53 |
+
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
54 |
+
defaults = dict(
|
55 |
+
lr=lr,
|
56 |
+
betas=betas,
|
57 |
+
eps=eps,
|
58 |
+
weight_decay=weight_decay,
|
59 |
+
amsgrad=amsgrad,
|
60 |
+
caution=caution,
|
61 |
+
)
|
62 |
+
super(AdamWLegacy, self).__init__(params, defaults)
|
63 |
+
|
64 |
+
def __setstate__(self, state):
|
65 |
+
super(AdamWLegacy, self).__setstate__(state)
|
66 |
+
for group in self.param_groups:
|
67 |
+
group.setdefault('amsgrad', False)
|
68 |
+
group.setdefault('caution', False)
|
69 |
+
|
70 |
+
@torch.no_grad()
|
71 |
+
def step(self, closure=None):
|
72 |
+
"""Performs a single optimization step.
|
73 |
+
|
74 |
+
Arguments:
|
75 |
+
closure (callable, optional): A closure that reevaluates the model
|
76 |
+
and returns the loss.
|
77 |
+
"""
|
78 |
+
loss = None
|
79 |
+
if closure is not None:
|
80 |
+
with torch.enable_grad():
|
81 |
+
loss = closure()
|
82 |
+
|
83 |
+
for group in self.param_groups:
|
84 |
+
for p in group['params']:
|
85 |
+
if p.grad is None:
|
86 |
+
continue
|
87 |
+
|
88 |
+
# Perform stepweight decay
|
89 |
+
p.data.mul_(1 - group['lr'] * group['weight_decay'])
|
90 |
+
|
91 |
+
# Perform optimization step
|
92 |
+
grad = p.grad
|
93 |
+
if grad.is_sparse:
|
94 |
+
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
|
95 |
+
amsgrad = group['amsgrad']
|
96 |
+
|
97 |
+
state = self.state[p]
|
98 |
+
|
99 |
+
# State initialization
|
100 |
+
if len(state) == 0:
|
101 |
+
state['step'] = 0
|
102 |
+
# Exponential moving average of gradient values
|
103 |
+
state['exp_avg'] = torch.zeros_like(p)
|
104 |
+
# Exponential moving average of squared gradient values
|
105 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
106 |
+
if amsgrad:
|
107 |
+
# Maintains max of all exp. moving avg. of sq. grad. values
|
108 |
+
state['max_exp_avg_sq'] = torch.zeros_like(p)
|
109 |
+
|
110 |
+
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
111 |
+
if amsgrad:
|
112 |
+
max_exp_avg_sq = state['max_exp_avg_sq']
|
113 |
+
beta1, beta2 = group['betas']
|
114 |
+
|
115 |
+
state['step'] += 1
|
116 |
+
bias_correction1 = 1 - beta1 ** state['step']
|
117 |
+
bias_correction2 = 1 - beta2 ** state['step']
|
118 |
+
|
119 |
+
# Decay the first and second moment running average coefficient
|
120 |
+
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
|
121 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
122 |
+
if amsgrad:
|
123 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
124 |
+
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
|
125 |
+
# Use the max. for normalizing running avg. of gradient
|
126 |
+
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
|
127 |
+
else:
|
128 |
+
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
|
129 |
+
|
130 |
+
step_size = group['lr'] / bias_correction1
|
131 |
+
|
132 |
+
if group['caution']:
|
133 |
+
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
|
134 |
+
mask = (exp_avg * grad > 0).to(grad.dtype)
|
135 |
+
mask.div_(mask.mean().clamp_(min=1e-3))
|
136 |
+
exp_avg = exp_avg * mask
|
137 |
+
|
138 |
+
p.addcdiv_(exp_avg, denom, value=-step_size)
|
139 |
+
|
140 |
+
return loss
|
pytorch-image-models/timm/optim/adan.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Adan Optimizer
|
2 |
+
|
3 |
+
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
|
4 |
+
https://arxiv.org/abs/2208.06677
|
5 |
+
|
6 |
+
Implementation adapted from https://github.com/sail-sg/Adan
|
7 |
+
"""
|
8 |
+
# Copyright 2022 Garena Online Private Limited
|
9 |
+
#
|
10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
11 |
+
# you may not use this file except in compliance with the License.
|
12 |
+
# You may obtain a copy of the License at
|
13 |
+
#
|
14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
15 |
+
#
|
16 |
+
# Unless required by applicable law or agreed to in writing, software
|
17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
19 |
+
# See the License for the specific language governing permissions and
|
20 |
+
# limitations under the License.
|
21 |
+
|
22 |
+
import math
|
23 |
+
from typing import List, Tuple
|
24 |
+
|
25 |
+
import torch
|
26 |
+
from torch import Tensor
|
27 |
+
from torch.optim.optimizer import Optimizer
|
28 |
+
|
29 |
+
|
30 |
+
class MultiTensorApply(object):
|
31 |
+
available = False
|
32 |
+
warned = False
|
33 |
+
|
34 |
+
def __init__(self, chunk_size):
|
35 |
+
try:
|
36 |
+
MultiTensorApply.available = True
|
37 |
+
self.chunk_size = chunk_size
|
38 |
+
except ImportError as err:
|
39 |
+
MultiTensorApply.available = False
|
40 |
+
MultiTensorApply.import_err = err
|
41 |
+
|
42 |
+
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
|
43 |
+
return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
|
44 |
+
|
45 |
+
|
46 |
+
class Adan(Optimizer):
|
47 |
+
""" Implements a pytorch variant of Adan.
|
48 |
+
|
49 |
+
Adan was proposed in Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models
|
50 |
+
https://arxiv.org/abs/2208.06677
|
51 |
+
|
52 |
+
Arguments:
|
53 |
+
params: Iterable of parameters to optimize or dicts defining parameter groups.
|
54 |
+
lr: Learning rate.
|
55 |
+
betas: Coefficients used for first- and second-order moments.
|
56 |
+
eps: Term added to the denominator to improve numerical stability.
|
57 |
+
weight_decay: Decoupled weight decay (L2 penalty)
|
58 |
+
no_prox: How to perform the weight decay
|
59 |
+
foreach: If True would use torch._foreach implementation. Faster but uses slightly more memory.
|
60 |
+
"""
|
61 |
+
|
62 |
+
def __init__(self,
|
63 |
+
params,
|
64 |
+
lr: float = 1e-3,
|
65 |
+
betas: Tuple[float, float, float] = (0.98, 0.92, 0.99),
|
66 |
+
eps: float = 1e-8,
|
67 |
+
weight_decay: float = 0.0,
|
68 |
+
no_prox: bool = False,
|
69 |
+
foreach: bool = True,
|
70 |
+
):
|
71 |
+
if not 0.0 <= lr:
|
72 |
+
raise ValueError('Invalid learning rate: {}'.format(lr))
|
73 |
+
if not 0.0 <= eps:
|
74 |
+
raise ValueError('Invalid epsilon value: {}'.format(eps))
|
75 |
+
if not 0.0 <= betas[0] < 1.0:
|
76 |
+
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
|
77 |
+
if not 0.0 <= betas[1] < 1.0:
|
78 |
+
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
|
79 |
+
if not 0.0 <= betas[2] < 1.0:
|
80 |
+
raise ValueError('Invalid beta parameter at index 2: {}'.format(betas[2]))
|
81 |
+
|
82 |
+
defaults = dict(
|
83 |
+
lr=lr,
|
84 |
+
betas=betas,
|
85 |
+
eps=eps,
|
86 |
+
weight_decay=weight_decay,
|
87 |
+
no_prox=no_prox,
|
88 |
+
foreach=foreach,
|
89 |
+
)
|
90 |
+
super().__init__(params, defaults)
|
91 |
+
|
92 |
+
def __setstate__(self, state):
|
93 |
+
super(Adan, self).__setstate__(state)
|
94 |
+
for group in self.param_groups:
|
95 |
+
group.setdefault('no_prox', False)
|
96 |
+
|
97 |
+
@torch.no_grad()
|
98 |
+
def restart_opt(self):
|
99 |
+
for group in self.param_groups:
|
100 |
+
group['step'] = 0
|
101 |
+
for p in group['params']:
|
102 |
+
if p.requires_grad:
|
103 |
+
state = self.state[p]
|
104 |
+
# State initialization
|
105 |
+
|
106 |
+
# Exponential moving average of gradient values
|
107 |
+
state['exp_avg'] = torch.zeros_like(p)
|
108 |
+
# Exponential moving average of squared gradient values
|
109 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
110 |
+
# Exponential moving average of gradient difference
|
111 |
+
state['exp_avg_diff'] = torch.zeros_like(p)
|
112 |
+
|
113 |
+
@torch.no_grad()
|
114 |
+
def step(self, closure=None):
|
115 |
+
"""Performs a single optimization step."""
|
116 |
+
loss = None
|
117 |
+
if closure is not None:
|
118 |
+
with torch.enable_grad():
|
119 |
+
loss = closure()
|
120 |
+
|
121 |
+
for group in self.param_groups:
|
122 |
+
params_with_grad = []
|
123 |
+
grads = []
|
124 |
+
exp_avgs = []
|
125 |
+
exp_avg_sqs = []
|
126 |
+
exp_avg_diffs = []
|
127 |
+
neg_pre_grads = []
|
128 |
+
|
129 |
+
beta1, beta2, beta3 = group['betas']
|
130 |
+
# assume same step across group now to simplify things
|
131 |
+
# per parameter step can be easily supported by making it a tensor, or pass list into kernel
|
132 |
+
if 'step' in group:
|
133 |
+
group['step'] += 1
|
134 |
+
else:
|
135 |
+
group['step'] = 1
|
136 |
+
|
137 |
+
bias_correction1 = 1.0 - beta1 ** group['step']
|
138 |
+
bias_correction2 = 1.0 - beta2 ** group['step']
|
139 |
+
bias_correction3 = 1.0 - beta3 ** group['step']
|
140 |
+
|
141 |
+
for p in group['params']:
|
142 |
+
if p.grad is None:
|
143 |
+
continue
|
144 |
+
params_with_grad.append(p)
|
145 |
+
grads.append(p.grad)
|
146 |
+
|
147 |
+
state = self.state[p]
|
148 |
+
if len(state) == 0:
|
149 |
+
state['exp_avg'] = torch.zeros_like(p)
|
150 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
151 |
+
state['exp_avg_diff'] = torch.zeros_like(p)
|
152 |
+
|
153 |
+
if 'neg_pre_grad' not in state or group['step'] == 1:
|
154 |
+
state['neg_pre_grad'] = -p.grad.clone()
|
155 |
+
|
156 |
+
exp_avgs.append(state['exp_avg'])
|
157 |
+
exp_avg_sqs.append(state['exp_avg_sq'])
|
158 |
+
exp_avg_diffs.append(state['exp_avg_diff'])
|
159 |
+
neg_pre_grads.append(state['neg_pre_grad'])
|
160 |
+
|
161 |
+
if not params_with_grad:
|
162 |
+
continue
|
163 |
+
|
164 |
+
kwargs = dict(
|
165 |
+
params=params_with_grad,
|
166 |
+
grads=grads,
|
167 |
+
exp_avgs=exp_avgs,
|
168 |
+
exp_avg_sqs=exp_avg_sqs,
|
169 |
+
exp_avg_diffs=exp_avg_diffs,
|
170 |
+
neg_pre_grads=neg_pre_grads,
|
171 |
+
beta1=beta1,
|
172 |
+
beta2=beta2,
|
173 |
+
beta3=beta3,
|
174 |
+
bias_correction1=bias_correction1,
|
175 |
+
bias_correction2=bias_correction2,
|
176 |
+
bias_correction3_sqrt=math.sqrt(bias_correction3),
|
177 |
+
lr=group['lr'],
|
178 |
+
weight_decay=group['weight_decay'],
|
179 |
+
eps=group['eps'],
|
180 |
+
no_prox=group['no_prox'],
|
181 |
+
)
|
182 |
+
|
183 |
+
if group['foreach']:
|
184 |
+
_multi_tensor_adan(**kwargs)
|
185 |
+
else:
|
186 |
+
_single_tensor_adan(**kwargs)
|
187 |
+
|
188 |
+
return loss
|
189 |
+
|
190 |
+
|
191 |
+
def _single_tensor_adan(
|
192 |
+
params: List[Tensor],
|
193 |
+
grads: List[Tensor],
|
194 |
+
exp_avgs: List[Tensor],
|
195 |
+
exp_avg_sqs: List[Tensor],
|
196 |
+
exp_avg_diffs: List[Tensor],
|
197 |
+
neg_pre_grads: List[Tensor],
|
198 |
+
*,
|
199 |
+
beta1: float,
|
200 |
+
beta2: float,
|
201 |
+
beta3: float,
|
202 |
+
bias_correction1: float,
|
203 |
+
bias_correction2: float,
|
204 |
+
bias_correction3_sqrt: float,
|
205 |
+
lr: float,
|
206 |
+
weight_decay: float,
|
207 |
+
eps: float,
|
208 |
+
no_prox: bool,
|
209 |
+
):
|
210 |
+
for i, param in enumerate(params):
|
211 |
+
grad = grads[i]
|
212 |
+
exp_avg = exp_avgs[i]
|
213 |
+
exp_avg_sq = exp_avg_sqs[i]
|
214 |
+
exp_avg_diff = exp_avg_diffs[i]
|
215 |
+
neg_grad_or_diff = neg_pre_grads[i]
|
216 |
+
|
217 |
+
# for memory saving, we use `neg_grad_or_diff` to get some temp variable in an inplace way
|
218 |
+
neg_grad_or_diff.add_(grad)
|
219 |
+
|
220 |
+
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
|
221 |
+
exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha=1 - beta2) # diff_t
|
222 |
+
|
223 |
+
neg_grad_or_diff.mul_(beta2).add_(grad)
|
224 |
+
exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, neg_grad_or_diff, value=1 - beta3) # n_t
|
225 |
+
|
226 |
+
denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps)
|
227 |
+
step_size_diff = lr * beta2 / bias_correction2
|
228 |
+
step_size = lr / bias_correction1
|
229 |
+
|
230 |
+
if no_prox:
|
231 |
+
param.mul_(1 - lr * weight_decay)
|
232 |
+
param.addcdiv_(exp_avg, denom, value=-step_size)
|
233 |
+
param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff)
|
234 |
+
else:
|
235 |
+
param.addcdiv_(exp_avg, denom, value=-step_size)
|
236 |
+
param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff)
|
237 |
+
param.div_(1 + lr * weight_decay)
|
238 |
+
|
239 |
+
neg_grad_or_diff.zero_().add_(grad, alpha=-1.0)
|
240 |
+
|
241 |
+
|
242 |
+
def _multi_tensor_adan(
|
243 |
+
params: List[Tensor],
|
244 |
+
grads: List[Tensor],
|
245 |
+
exp_avgs: List[Tensor],
|
246 |
+
exp_avg_sqs: List[Tensor],
|
247 |
+
exp_avg_diffs: List[Tensor],
|
248 |
+
neg_pre_grads: List[Tensor],
|
249 |
+
*,
|
250 |
+
beta1: float,
|
251 |
+
beta2: float,
|
252 |
+
beta3: float,
|
253 |
+
bias_correction1: float,
|
254 |
+
bias_correction2: float,
|
255 |
+
bias_correction3_sqrt: float,
|
256 |
+
lr: float,
|
257 |
+
weight_decay: float,
|
258 |
+
eps: float,
|
259 |
+
no_prox: bool,
|
260 |
+
):
|
261 |
+
if len(params) == 0:
|
262 |
+
return
|
263 |
+
|
264 |
+
# for memory saving, we use `neg_pre_grads` to get some temp variable in a inplace way
|
265 |
+
torch._foreach_add_(neg_pre_grads, grads)
|
266 |
+
|
267 |
+
torch._foreach_mul_(exp_avgs, beta1)
|
268 |
+
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
|
269 |
+
|
270 |
+
torch._foreach_mul_(exp_avg_diffs, beta2)
|
271 |
+
torch._foreach_add_(exp_avg_diffs, neg_pre_grads, alpha=1 - beta2) # diff_t
|
272 |
+
|
273 |
+
torch._foreach_mul_(neg_pre_grads, beta2)
|
274 |
+
torch._foreach_add_(neg_pre_grads, grads)
|
275 |
+
torch._foreach_mul_(exp_avg_sqs, beta3)
|
276 |
+
torch._foreach_addcmul_(exp_avg_sqs, neg_pre_grads, neg_pre_grads, value=1 - beta3) # n_t
|
277 |
+
|
278 |
+
denom = torch._foreach_sqrt(exp_avg_sqs)
|
279 |
+
torch._foreach_div_(denom, bias_correction3_sqrt)
|
280 |
+
torch._foreach_add_(denom, eps)
|
281 |
+
|
282 |
+
step_size_diff = lr * beta2 / bias_correction2
|
283 |
+
step_size = lr / bias_correction1
|
284 |
+
|
285 |
+
if no_prox:
|
286 |
+
torch._foreach_mul_(params, 1 - lr * weight_decay)
|
287 |
+
torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size)
|
288 |
+
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff)
|
289 |
+
else:
|
290 |
+
torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size)
|
291 |
+
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff)
|
292 |
+
torch._foreach_div_(params, 1 + lr * weight_decay)
|
293 |
+
|
294 |
+
torch._foreach_zero_(neg_pre_grads)
|
295 |
+
torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0)
|
pytorch-image-models/timm/optim/lamb.py
ADDED
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb
|
2 |
+
|
3 |
+
This optimizer code was adapted from the following (starting with latest)
|
4 |
+
* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py
|
5 |
+
* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
|
6 |
+
* https://github.com/cybertronai/pytorch-lamb
|
7 |
+
|
8 |
+
Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is
|
9 |
+
similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX.
|
10 |
+
|
11 |
+
In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU.
|
12 |
+
|
13 |
+
Original copyrights for above sources are below.
|
14 |
+
|
15 |
+
Modifications Copyright 2021 Ross Wightman
|
16 |
+
"""
|
17 |
+
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
|
18 |
+
|
19 |
+
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
20 |
+
#
|
21 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
22 |
+
# you may not use this file except in compliance with the License.
|
23 |
+
# You may obtain a copy of the License at
|
24 |
+
#
|
25 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
26 |
+
#
|
27 |
+
# Unless required by applicable law or agreed to in writing, software
|
28 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
29 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
30 |
+
# See the License for the specific language governing permissions and
|
31 |
+
# limitations under the License.
|
32 |
+
|
33 |
+
# MIT License
|
34 |
+
#
|
35 |
+
# Copyright (c) 2019 cybertronai
|
36 |
+
#
|
37 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
38 |
+
# of this software and associated documentation files (the "Software"), to deal
|
39 |
+
# in the Software without restriction, including without limitation the rights
|
40 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
41 |
+
# copies of the Software, and to permit persons to whom the Software is
|
42 |
+
# furnished to do so, subject to the following conditions:
|
43 |
+
#
|
44 |
+
# The above copyright notice and this permission notice shall be included in all
|
45 |
+
# copies or substantial portions of the Software.
|
46 |
+
#
|
47 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
48 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
49 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
50 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
51 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
52 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
53 |
+
# SOFTWARE.
|
54 |
+
import math
|
55 |
+
from typing import Optional, Tuple
|
56 |
+
|
57 |
+
import torch
|
58 |
+
from torch.optim import Optimizer
|
59 |
+
|
60 |
+
from ._types import ParamsT
|
61 |
+
|
62 |
+
|
63 |
+
class Lamb(Optimizer):
|
64 |
+
"""Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB
|
65 |
+
reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
|
66 |
+
|
67 |
+
LAMB was proposed in:
|
68 |
+
- Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962
|
69 |
+
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
|
70 |
+
|
71 |
+
Args:
|
72 |
+
params: Iterable of parameters to optimize or dicts defining parameter groups.
|
73 |
+
lr: Learning rate
|
74 |
+
betas: Coefficients used for computing running averages of gradient and its norm.
|
75 |
+
eps: Term added to the denominator to improve numerical stability.
|
76 |
+
weight_decay: Weight decay
|
77 |
+
grad_averaging: Whether apply (1-beta2) to grad when calculating running averages of gradient.
|
78 |
+
max_grad_norm: Value used to clip global grad norm.
|
79 |
+
trust_clip: Enable LAMBC trust ratio clipping.
|
80 |
+
always_adapt: Apply adaptive learning rate to 0.0 weight decay parameter.
|
81 |
+
caution: Apply caution.
|
82 |
+
"""
|
83 |
+
|
84 |
+
def __init__(
|
85 |
+
self,
|
86 |
+
params: ParamsT,
|
87 |
+
lr: float = 1e-3,
|
88 |
+
bias_correction: bool = True,
|
89 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
90 |
+
eps: float = 1e-6,
|
91 |
+
weight_decay: float = 0.01,
|
92 |
+
grad_averaging: bool = True,
|
93 |
+
max_grad_norm: Optional[float] = 1.0,
|
94 |
+
trust_clip: bool = False,
|
95 |
+
always_adapt: bool = False,
|
96 |
+
caution: bool = False,
|
97 |
+
):
|
98 |
+
defaults = dict(
|
99 |
+
lr=lr,
|
100 |
+
bias_correction=bias_correction,
|
101 |
+
betas=betas,
|
102 |
+
eps=eps,
|
103 |
+
weight_decay=weight_decay,
|
104 |
+
grad_averaging=grad_averaging,
|
105 |
+
max_grad_norm=max_grad_norm,
|
106 |
+
trust_clip=trust_clip,
|
107 |
+
always_adapt=always_adapt,
|
108 |
+
caution=caution,
|
109 |
+
)
|
110 |
+
super().__init__(params, defaults)
|
111 |
+
|
112 |
+
def __setstate__(self, state):
|
113 |
+
super().__setstate__(state)
|
114 |
+
for group in self.param_groups:
|
115 |
+
group.setdefault('caution', False)
|
116 |
+
|
117 |
+
def _get_clip_grad_norm(self):
|
118 |
+
max_grad_norm = self.defaults['max_grad_norm']
|
119 |
+
if max_grad_norm is None:
|
120 |
+
return None
|
121 |
+
|
122 |
+
norms = []
|
123 |
+
for group in self.param_groups:
|
124 |
+
for p in group['params']:
|
125 |
+
if p.grad is None:
|
126 |
+
continue
|
127 |
+
grad = p.grad
|
128 |
+
if grad.is_sparse:
|
129 |
+
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instead.')
|
130 |
+
norms.append(torch.linalg.vector_norm(grad))
|
131 |
+
global_norm = torch.linalg.vector_norm(torch.stack(norms))
|
132 |
+
clip_global_norm = (global_norm / max_grad_norm).clamp_(min=1.0)
|
133 |
+
return clip_global_norm
|
134 |
+
|
135 |
+
@torch.no_grad()
|
136 |
+
def step(self, closure=None):
|
137 |
+
"""Performs a single optimization step.
|
138 |
+
Arguments:
|
139 |
+
closure (callable, optional): A closure that reevaluates the model
|
140 |
+
and returns the loss.
|
141 |
+
"""
|
142 |
+
loss = None
|
143 |
+
if closure is not None:
|
144 |
+
with torch.enable_grad():
|
145 |
+
loss = closure()
|
146 |
+
|
147 |
+
clip_grad_norm = self._get_clip_grad_norm() # None if disabled
|
148 |
+
|
149 |
+
for group in self.param_groups:
|
150 |
+
bias_correction = 1 if group['bias_correction'] else 0
|
151 |
+
beta1, beta2 = group['betas']
|
152 |
+
grad_averaging = 1 if group['grad_averaging'] else 0
|
153 |
+
beta3 = 1 - beta1 if grad_averaging else 1.0
|
154 |
+
|
155 |
+
# assume same step across group now to simplify things
|
156 |
+
# per parameter step can be easily support by making it tensor, or pass list into kernel
|
157 |
+
if 'step' in group:
|
158 |
+
group['step'] += 1
|
159 |
+
else:
|
160 |
+
group['step'] = 1
|
161 |
+
|
162 |
+
if bias_correction:
|
163 |
+
bias_correction1 = 1 - beta1 ** group['step']
|
164 |
+
bias_correction2 = 1 - beta2 ** group['step']
|
165 |
+
else:
|
166 |
+
bias_correction1, bias_correction2 = 1.0, 1.0
|
167 |
+
|
168 |
+
for p in group['params']:
|
169 |
+
if p.grad is None:
|
170 |
+
continue
|
171 |
+
grad = p.grad
|
172 |
+
|
173 |
+
if clip_grad_norm is not None:
|
174 |
+
grad.div_(clip_grad_norm)
|
175 |
+
|
176 |
+
state = self.state[p]
|
177 |
+
|
178 |
+
# State initialization
|
179 |
+
if len(state) == 0:
|
180 |
+
# Exponential moving average of gradient valuesa
|
181 |
+
state['exp_avg'] = torch.zeros_like(p)
|
182 |
+
# Exponential moving average of squared gradient values
|
183 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
184 |
+
|
185 |
+
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
186 |
+
|
187 |
+
# Decay the first and second moment running average coefficient
|
188 |
+
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
|
189 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
|
190 |
+
|
191 |
+
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
|
192 |
+
update = (exp_avg / bias_correction1).div_(denom)
|
193 |
+
|
194 |
+
if group['caution']:
|
195 |
+
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
|
196 |
+
mask = (update * grad > 0).to(grad.dtype)
|
197 |
+
mask.div_(mask.mean().clamp_(min=1e-3))
|
198 |
+
update.mul_(mask)
|
199 |
+
|
200 |
+
weight_decay = group['weight_decay']
|
201 |
+
if weight_decay != 0:
|
202 |
+
update.add_(p, alpha=weight_decay)
|
203 |
+
|
204 |
+
if weight_decay != 0 or group['always_adapt']:
|
205 |
+
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
|
206 |
+
# excluded from weight decay, unless always_adapt == True, then always enabled.
|
207 |
+
w_norm = p.norm(2.0)
|
208 |
+
g_norm = update.norm(2.0)
|
209 |
+
trust_ratio = w_norm / g_norm
|
210 |
+
# FIXME nested where required since logical and/or not working in PT XLA
|
211 |
+
# Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero
|
212 |
+
trust_ratio = torch.where(
|
213 |
+
w_norm > 0,
|
214 |
+
torch.where(g_norm > 0, trust_ratio, 1.0),
|
215 |
+
1.0,
|
216 |
+
)
|
217 |
+
if group['trust_clip']:
|
218 |
+
# LAMBC trust clipping, upper bound fixed at one
|
219 |
+
trust_ratio = torch.clamp(trust_ratio, max=1.0)
|
220 |
+
update.mul_(trust_ratio)
|
221 |
+
|
222 |
+
p.add_(update, alpha=-group['lr'])
|
223 |
+
|
224 |
+
return loss
|
pytorch-image-models/timm/optim/nadam.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.optim.optimizer import Optimizer
|
5 |
+
|
6 |
+
|
7 |
+
class NAdamLegacy(Optimizer):
|
8 |
+
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
|
9 |
+
|
10 |
+
NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference
|
11 |
+
|
12 |
+
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
|
13 |
+
|
14 |
+
Arguments:
|
15 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
16 |
+
parameter groups
|
17 |
+
lr (float, optional): learning rate (default: 2e-3)
|
18 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
19 |
+
running averages of gradient and its square
|
20 |
+
eps (float, optional): term added to the denominator to improve
|
21 |
+
numerical stability (default: 1e-8)
|
22 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
23 |
+
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
|
24 |
+
|
25 |
+
__ http://cs229.stanford.edu/proj2015/054_report.pdf
|
26 |
+
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
|
27 |
+
|
28 |
+
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
|
29 |
+
NOTE: Has potential issues but does work well on some problems.
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
params,
|
35 |
+
lr=2e-3,
|
36 |
+
betas=(0.9, 0.999),
|
37 |
+
eps=1e-8,
|
38 |
+
weight_decay=0,
|
39 |
+
schedule_decay=4e-3,
|
40 |
+
):
|
41 |
+
if not 0.0 <= lr:
|
42 |
+
raise ValueError("Invalid learning rate: {}".format(lr))
|
43 |
+
defaults = dict(
|
44 |
+
lr=lr,
|
45 |
+
betas=betas,
|
46 |
+
eps=eps,
|
47 |
+
weight_decay=weight_decay,
|
48 |
+
schedule_decay=schedule_decay,
|
49 |
+
)
|
50 |
+
super(NAdamLegacy, self).__init__(params, defaults)
|
51 |
+
|
52 |
+
@torch.no_grad()
|
53 |
+
def step(self, closure=None):
|
54 |
+
"""Performs a single optimization step.
|
55 |
+
|
56 |
+
Arguments:
|
57 |
+
closure (callable, optional): A closure that reevaluates the model
|
58 |
+
and returns the loss.
|
59 |
+
"""
|
60 |
+
loss = None
|
61 |
+
if closure is not None:
|
62 |
+
with torch.enable_grad():
|
63 |
+
loss = closure()
|
64 |
+
|
65 |
+
for group in self.param_groups:
|
66 |
+
for p in group['params']:
|
67 |
+
if p.grad is None:
|
68 |
+
continue
|
69 |
+
grad = p.grad
|
70 |
+
state = self.state[p]
|
71 |
+
|
72 |
+
# State initialization
|
73 |
+
if len(state) == 0:
|
74 |
+
state['step'] = 0
|
75 |
+
state['m_schedule'] = 1.
|
76 |
+
state['exp_avg'] = torch.zeros_like(p)
|
77 |
+
state['exp_avg_sq'] = torch.zeros_like(p)
|
78 |
+
|
79 |
+
# Warming momentum schedule
|
80 |
+
m_schedule = state['m_schedule']
|
81 |
+
schedule_decay = group['schedule_decay']
|
82 |
+
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
83 |
+
beta1, beta2 = group['betas']
|
84 |
+
eps = group['eps']
|
85 |
+
state['step'] += 1
|
86 |
+
t = state['step']
|
87 |
+
bias_correction2 = 1 - beta2 ** t
|
88 |
+
|
89 |
+
if group['weight_decay'] != 0:
|
90 |
+
grad = grad.add(p, alpha=group['weight_decay'])
|
91 |
+
|
92 |
+
momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay)))
|
93 |
+
momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
|
94 |
+
m_schedule_new = m_schedule * momentum_cache_t
|
95 |
+
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
|
96 |
+
state['m_schedule'] = m_schedule_new
|
97 |
+
|
98 |
+
# Decay the first and second moment running average coefficient
|
99 |
+
exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1)
|
100 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2)
|
101 |
+
|
102 |
+
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
|
103 |
+
p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new))
|
104 |
+
p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next))
|
105 |
+
|
106 |
+
return loss
|
pytorch-image-models/timm/optim/rmsprop_tf.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" RMSProp modified to behave like Tensorflow impl
|
2 |
+
|
3 |
+
Originally cut & paste from PyTorch RMSProp
|
4 |
+
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
|
5 |
+
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
|
6 |
+
|
7 |
+
Modifications Copyright 2021 Ross Wightman
|
8 |
+
"""
|
9 |
+
|
10 |
+
import torch
|
11 |
+
from torch.optim import Optimizer
|
12 |
+
|
13 |
+
from ._types import ParamsT
|
14 |
+
|
15 |
+
|
16 |
+
class RMSpropTF(Optimizer):
|
17 |
+
"""Implements RMSprop algorithm (TensorFlow style epsilon)
|
18 |
+
|
19 |
+
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
|
20 |
+
and a few other modifications to closer match Tensorflow for matching hyper-params.
|
21 |
+
|
22 |
+
Noteworthy changes include:
|
23 |
+
1. Epsilon applied inside square-root
|
24 |
+
2. square_avg initialized to ones
|
25 |
+
3. LR scaling of update accumulated in momentum buffer
|
26 |
+
|
27 |
+
Proposed by G. Hinton in his
|
28 |
+
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
|
29 |
+
|
30 |
+
The centered version first appears in `Generating Sequences
|
31 |
+
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
params: iterable of parameters to optimize or dicts defining parameter groups
|
35 |
+
lr: learning rate
|
36 |
+
momentum: momentum factor
|
37 |
+
alpha: smoothing (decay) constant
|
38 |
+
eps: term added to the denominator to improve numerical stability
|
39 |
+
centered: if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance
|
40 |
+
weight_decay: weight decay (L2 penalty) (default: 0)
|
41 |
+
decoupled_decay: decoupled weight decay as per https://arxiv.org/abs/1711.05101
|
42 |
+
lr_in_momentum: learning rate scaling is included in the momentum buffer update as per defaults in Tensorflow
|
43 |
+
caution: apply caution
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(
|
47 |
+
self,
|
48 |
+
params: ParamsT,
|
49 |
+
lr: float = 1e-2,
|
50 |
+
alpha: float = 0.9,
|
51 |
+
eps: float = 1e-10,
|
52 |
+
weight_decay: float = 0,
|
53 |
+
momentum: float = 0.,
|
54 |
+
centered: bool = False,
|
55 |
+
decoupled_decay: bool = False,
|
56 |
+
lr_in_momentum: bool = True,
|
57 |
+
caution: bool = False,
|
58 |
+
):
|
59 |
+
if not 0.0 <= lr:
|
60 |
+
raise ValueError("Invalid learning rate: {}".format(lr))
|
61 |
+
if not 0.0 <= eps:
|
62 |
+
raise ValueError("Invalid epsilon value: {}".format(eps))
|
63 |
+
if not 0.0 <= momentum:
|
64 |
+
raise ValueError("Invalid momentum value: {}".format(momentum))
|
65 |
+
if not 0.0 <= weight_decay:
|
66 |
+
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
67 |
+
if not 0.0 <= alpha:
|
68 |
+
raise ValueError("Invalid alpha value: {}".format(alpha))
|
69 |
+
|
70 |
+
defaults = dict(
|
71 |
+
lr=lr,
|
72 |
+
momentum=momentum,
|
73 |
+
alpha=alpha,
|
74 |
+
eps=eps,
|
75 |
+
centered=centered,
|
76 |
+
weight_decay=weight_decay,
|
77 |
+
decoupled_decay=decoupled_decay,
|
78 |
+
lr_in_momentum=lr_in_momentum,
|
79 |
+
caution=caution,
|
80 |
+
)
|
81 |
+
super(RMSpropTF, self).__init__(params, defaults)
|
82 |
+
|
83 |
+
def __setstate__(self, state):
|
84 |
+
super(RMSpropTF, self).__setstate__(state)
|
85 |
+
for group in self.param_groups:
|
86 |
+
group.setdefault('momentum', 0)
|
87 |
+
group.setdefault('centered', False)
|
88 |
+
group.setdefault('caution', False)
|
89 |
+
|
90 |
+
@torch.no_grad()
|
91 |
+
def step(self, closure=None):
|
92 |
+
"""Performs a single optimization step.
|
93 |
+
|
94 |
+
Arguments:
|
95 |
+
closure (callable, optional): A closure that reevaluates the model
|
96 |
+
and returns the loss.
|
97 |
+
"""
|
98 |
+
loss = None
|
99 |
+
if closure is not None:
|
100 |
+
with torch.enable_grad():
|
101 |
+
loss = closure()
|
102 |
+
|
103 |
+
for group in self.param_groups:
|
104 |
+
for p in group['params']:
|
105 |
+
if p.grad is None:
|
106 |
+
continue
|
107 |
+
grad = p.grad
|
108 |
+
if grad.is_sparse:
|
109 |
+
raise RuntimeError('RMSprop does not support sparse gradients')
|
110 |
+
state = self.state[p]
|
111 |
+
|
112 |
+
# State initialization
|
113 |
+
if len(state) == 0:
|
114 |
+
state['step'] = 0
|
115 |
+
state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero
|
116 |
+
if group['momentum'] > 0:
|
117 |
+
state['momentum_buffer'] = torch.zeros_like(p)
|
118 |
+
if group['centered']:
|
119 |
+
state['grad_avg'] = torch.zeros_like(p)
|
120 |
+
|
121 |
+
square_avg = state['square_avg']
|
122 |
+
one_minus_alpha = 1. - group['alpha']
|
123 |
+
|
124 |
+
state['step'] += 1
|
125 |
+
|
126 |
+
if group['weight_decay'] != 0:
|
127 |
+
if group['decoupled_decay']:
|
128 |
+
p.mul_(1. - group['lr'] * group['weight_decay'])
|
129 |
+
else:
|
130 |
+
grad = grad.add(p, alpha=group['weight_decay'])
|
131 |
+
|
132 |
+
# Tensorflow order of ops for updating squared avg
|
133 |
+
square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha)
|
134 |
+
# square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original
|
135 |
+
|
136 |
+
if group['centered']:
|
137 |
+
grad_avg = state['grad_avg']
|
138 |
+
grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha)
|
139 |
+
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt
|
140 |
+
# grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original
|
141 |
+
else:
|
142 |
+
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
|
143 |
+
|
144 |
+
if group['momentum'] > 0:
|
145 |
+
buf = state['momentum_buffer']
|
146 |
+
buf.mul_(group['momentum'])
|
147 |
+
|
148 |
+
def _apply_caution(_m, _g):
|
149 |
+
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
|
150 |
+
mask = (_m * _g > 0).to(_g.dtype)
|
151 |
+
mask.div_(mask.mean().clamp_(min=1e-3))
|
152 |
+
return _m * mask
|
153 |
+
|
154 |
+
if group['lr_in_momentum']:
|
155 |
+
# Tensorflow accumulates the LR scaling in the momentum buffer
|
156 |
+
buf.addcdiv_(grad, avg, value=group['lr'])
|
157 |
+
if group['caution']:
|
158 |
+
buf = _apply_caution(buf, grad)
|
159 |
+
p.add_(-buf)
|
160 |
+
else:
|
161 |
+
# PyTorch scales the param update by LR
|
162 |
+
buf.addcdiv_(grad, avg)
|
163 |
+
if group['caution']:
|
164 |
+
buf = _apply_caution(buf, grad)
|
165 |
+
p.add_(buf, alpha=-group['lr'])
|
166 |
+
else:
|
167 |
+
p.addcdiv_(grad, avg, value=-group['lr'])
|
168 |
+
|
169 |
+
return loss
|
pytorch-image-models/timm/scheduler/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .cosine_lr import CosineLRScheduler
|
2 |
+
from .multistep_lr import MultiStepLRScheduler
|
3 |
+
from .plateau_lr import PlateauLRScheduler
|
4 |
+
from .poly_lr import PolyLRScheduler
|
5 |
+
from .step_lr import StepLRScheduler
|
6 |
+
from .tanh_lr import TanhLRScheduler
|
7 |
+
|
8 |
+
from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs
|
pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (560 Bytes). View file
|
|
pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc
ADDED
Binary file (3.88 kB). View file
|
|
pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc
ADDED
Binary file (3.5 kB). View file
|
|