prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import Iterable, Union
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`"On the importance of initialization and momentum in deep learning"
<http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
c1 = tensor([1.0])
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
| _inplace_add_(v, grad, alpha=_momentum, beta=c1) | megengine.functional.inplace._inplace_add_ |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = | dist.get_world_size() | megengine.distributed.get_world_size |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = | GradManager() | megengine.autodiff.GradManager |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
| GradScaler(init_scale=65536.0, growth_interval=2000) | megengine.amp.GradScaler |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_cfg.dynamic_scale
else | GradScaler(init_scale=128.0, growth_interval=0) | megengine.amp.GradScaler |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_cfg.dynamic_scale
else GradScaler(init_scale=128.0, growth_interval=0)
)
return Solver(optimizer, gm, scaler)
@classmethod
def build_optimizer(
cls, cfg: ConfigDict, params: Union[Iterable[Parameter], dict], lr: float, wd: float
) -> optim.Optimizer:
"""Build optimizer according to training config.
Args:
cfg: config for training.
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
weight_decay: weight decay (L2, penalty).
Returns:
An optimizer.
"""
if cfg.optimizer == "adam":
return | optim.Adam(params, lr=lr, weight_decay=wd, betas=cfg.betas) | megengine.optimizer.Adam |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [ | dist.make_allreduce_cb("mean", dist.WORLD) | megengine.distributed.make_allreduce_cb |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_cfg.dynamic_scale
else GradScaler(init_scale=128.0, growth_interval=0)
)
return Solver(optimizer, gm, scaler)
@classmethod
def build_optimizer(
cls, cfg: ConfigDict, params: Union[Iterable[Parameter], dict], lr: float, wd: float
) -> optim.Optimizer:
"""Build optimizer according to training config.
Args:
cfg: config for training.
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
weight_decay: weight decay (L2, penalty).
Returns:
An optimizer.
"""
if cfg.optimizer == "adam":
return optim.Adam(params, lr=lr, weight_decay=wd, betas=cfg.betas)
elif cfg.optimizer == "adamw":
return | optim.AdamW(params, lr=lr, weight_decay=wd, betas=cfg.betas) | megengine.optimizer.AdamW |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = | Buffer(v) | megengine.Buffer |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = | F.add_update(b, 1) | megengine.functional.add_update |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = | F.add_update(b, 1) | megengine.functional.add_update |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = | tensor(y) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = | Buffer(b) | megengine.Buffer |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
| F.add_update(y, z, beta=0.1) | megengine.functional.add_update |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
| assertTensorClose(res, b + 1) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = | mgb.dtype.qint8(inp_scale) | megengine._internal.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = | mgb.dtype.qint8(w_scale) | megengine._internal.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = | mgb.dtype.qint32(inp_scale * w_scale) | megengine._internal.dtype.qint32 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = | mgb.dtype.qint8(outp_scale) | megengine._internal.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return | F.concat([data1, data2]) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return | F.add_update(y, x) | megengine.functional.add_update |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = | mgb.dtype.get_scale(inp_dtype) | megengine._internal.dtype.get_scale |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = | mgb.dtype.get_scale(w_dtype) | megengine._internal.dtype.get_scale |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = | mgb.dtype.get_scale(b_dtype) | megengine._internal.dtype.get_scale |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = | mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype) | megengine._internal.dtype.convert_to_qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = | mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype) | megengine._internal.dtype.convert_to_qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = | mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype) | megengine._internal.dtype.convert_to_qint32 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = | tensor(inpv, dtype=inp_dtype) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = | Parameter(wv, dtype=w_dtype) | megengine.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = | Parameter(bv, dtype=b_dtype) | megengine.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@ | jit.trace(symbolic=b_symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@ | jit.trace(symbolic=b_symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = | F.flatten(expected) | megengine.functional.flatten |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = F.flatten(expected)
result = | F.flatten(result) | megengine.functional.flatten |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = F.flatten(expected)
result = F.flatten(result)
assertTensorClose(result.numpy(), expected.numpy())
if not | is_cuda_available() | megengine.is_cuda_available |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha= | tensor(0.9) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if | is_cuda_available() | megengine.is_cuda_available |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), | tensor(label1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), | tensor(label2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return | F.relu(O) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = | F.flatten(b) | megengine.functional.flatten |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax( | tensor(data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax( | tensor(data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = | F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True) | megengine.functional.vision.interpolate |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = | F.arange(0, W) | megengine.functional.arange |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = | F.tile(x_base, (B, H, 1)) | megengine.functional.tile |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = | F.arange(0, H) | megengine.functional.arange |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = | F.stack([x_base, y_base], 1) | megengine.functional.stack |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = F.stack([x_base, y_base], 1) # B2HW
return base_grid
def flow_warp(x, flow12):
B, _, H, W = x.shape
base_grid = mesh_grid(B, H, W).astype(x) # B2HW
grid_warp = base_grid + flow12
grid_warp = | F.transpose(grid_warp, (0, 2, 3, 1)) | megengine.functional.transpose |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = F.stack([x_base, y_base], 1) # B2HW
return base_grid
def flow_warp(x, flow12):
B, _, H, W = x.shape
base_grid = mesh_grid(B, H, W).astype(x) # B2HW
grid_warp = base_grid + flow12
grid_warp = F.transpose(grid_warp, (0, 2, 3, 1))
warp_imgs = | F.vision.remap(x, grid_warp) | megengine.functional.vision.remap |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = F.stack([x_base, y_base], 1) # B2HW
return base_grid
def flow_warp(x, flow12):
B, _, H, W = x.shape
base_grid = mesh_grid(B, H, W).astype(x) # B2HW
grid_warp = base_grid + flow12
grid_warp = F.transpose(grid_warp, (0, 2, 3, 1))
warp_imgs = F.vision.remap(x, grid_warp)
return warp_imgs
def euclidean(t):
return F.sqrt(F.sum(t**2, axis=(1, ), keepdims=True))
def flow_error_avg(pred_flow, gt_flow):
_, _, H, W = gt_flow.shape
_, _, h, w = pred_flow.shape
assert (H == h) and (W == w), "inps shape is not the same: {} - {}".format((H, W), (h, w))
diff = euclidean(pred_flow - gt_flow)
diff_s = | F.mean(diff) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = | F.tile(y_base, (B, W, 1)) | megengine.functional.tile |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = | mge.Tensor(v) | megengine.Tensor |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = | CompNode(device) | megengine.device.CompNode |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = | megengine.tensor(1e-5, dtype=dtype, device=device) | megengine.tensor |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [ | get_default_device() | megengine.device.get_default_device |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f( | GetVarShape() | megengine.core.ops.builtin.GetVarShape |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f( | Reduce(mode="product", axis=0) | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f( | Reduce(mode="product", axis=0) | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f( | TypeCvt(dtype=dtype) | megengine.core.ops.builtin.TypeCvt |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f( | Reduce(mode="sum") | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f( | Reduce(mode="sum_sqr") | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = | trace(symbolic=symbolic) | megengine.jit.trace |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = | trace(symbolic=symbolic) | megengine.jit.trace |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with | GradManager() | megengine.autodiff.grad_manager.GradManager |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with | GradManager() | megengine.autodiff.grad_manager.GradManager |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
M.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = | M.Sigmoid() | megengine.module.Sigmoid |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
M.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = M.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(M.Module):
def __init__(self, kernel_size=3):
super().__init__()
self.conv = | M.Conv2d(2,1,kernel_size, padding=1, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
M.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = M.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(M.Module):
def __init__(self, kernel_size=3):
super().__init__()
self.conv = M.Conv2d(2,1,kernel_size, padding=1, bias=False)
self.sigmoid = | M.Sigmoid() | megengine.module.Sigmoid |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean( | F.mean(x, axis=-2, keepdims=True) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max( | F.max(x, axis=-2, keepdims=True) | megengine.functional.max |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
| M.Conv2d(in_planes, in_planes // ratio, 1, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), | M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
| M.Conv2d(in_planes // ratio, in_planes, 1, bias=False) | megengine.module.Conv2d |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return | M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b) | megengine.module.Conv2d |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return | M.AdaptiveAvgPool2d(shape) | megengine.module.AdaptiveAvgPool2d |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return M.AdaptiveAvgPool2d(shape)
def linear(w_in: int, w_out: int, *, bias: bool = False) -> M.Linear:
"""Helper for building a linear layer.
Args:
w_in: input width.
w_out: output width.
bias: enable bias or not. Default: ``False``
Returns:
A linear module.
"""
return | M.Linear(w_in, w_out, bias=bias) | megengine.module.Linear |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return | M.Identity() | megengine.module.Identity |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return M.AdaptiveAvgPool2d(shape)
def linear(w_in: int, w_out: int, *, bias: bool = False) -> M.Linear:
"""Helper for building a linear layer.
Args:
w_in: input width.
w_out: output width.
bias: enable bias or not. Default: ``False``
Returns:
A linear module.
"""
return M.Linear(w_in, w_out, bias=bias)
class SE(M.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.
Args:
w_in: input width.
w_se: se width.
act_name: activation name.
approx_sigmoid: approximated sigmoid function.
Attributes:
avg_pool: gad2d layer.
f_ex: sequantial which conbines conv2d -> act -> conv2d -> sigmoid.
"""
def __init__(self, w_in: int, w_se: int, act_name: str, approx_sigmoid: bool = False):
super().__init__()
self.avg_pool = gap2d()
self.f_ex = M.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation(act_name),
conv2d(w_se, w_in, 1, bias=True),
activation("hsigmoid") if approx_sigmoid else M.Sigmoid(),
)
def forward(self, x: mge.Tensor) -> mge.Tensor:
return x * self.f_ex(self.avg_pool(x))
class DropPath(M.Dropout):
"""DropPath block.
Args:
drop_prob: the probability to drop (set to zero) each path.
"""
def forward(self, x: mge.Tensor):
if not self.training or self.drop_prob == 0.0:
return x
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
mask = | F.ones(shape) | megengine.functional.ones |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return M.AdaptiveAvgPool2d(shape)
def linear(w_in: int, w_out: int, *, bias: bool = False) -> M.Linear:
"""Helper for building a linear layer.
Args:
w_in: input width.
w_out: output width.
bias: enable bias or not. Default: ``False``
Returns:
A linear module.
"""
return M.Linear(w_in, w_out, bias=bias)
class SE(M.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.
Args:
w_in: input width.
w_se: se width.
act_name: activation name.
approx_sigmoid: approximated sigmoid function.
Attributes:
avg_pool: gad2d layer.
f_ex: sequantial which conbines conv2d -> act -> conv2d -> sigmoid.
"""
def __init__(self, w_in: int, w_se: int, act_name: str, approx_sigmoid: bool = False):
super().__init__()
self.avg_pool = gap2d()
self.f_ex = M.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation(act_name),
conv2d(w_se, w_in, 1, bias=True),
activation("hsigmoid") if approx_sigmoid else M.Sigmoid(),
)
def forward(self, x: mge.Tensor) -> mge.Tensor:
return x * self.f_ex(self.avg_pool(x))
class DropPath(M.Dropout):
"""DropPath block.
Args:
drop_prob: the probability to drop (set to zero) each path.
"""
def forward(self, x: mge.Tensor):
if not self.training or self.drop_prob == 0.0:
return x
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
mask = F.ones(shape)
mask = | F.dropout(mask, self.drop_prob, training=self.training) | megengine.functional.dropout |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return M.AdaptiveAvgPool2d(shape)
def linear(w_in: int, w_out: int, *, bias: bool = False) -> M.Linear:
"""Helper for building a linear layer.
Args:
w_in: input width.
w_out: output width.
bias: enable bias or not. Default: ``False``
Returns:
A linear module.
"""
return M.Linear(w_in, w_out, bias=bias)
class SE(M.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.
Args:
w_in: input width.
w_se: se width.
act_name: activation name.
approx_sigmoid: approximated sigmoid function.
Attributes:
avg_pool: gad2d layer.
f_ex: sequantial which conbines conv2d -> act -> conv2d -> sigmoid.
"""
def __init__(self, w_in: int, w_se: int, act_name: str, approx_sigmoid: bool = False):
super().__init__()
self.avg_pool = gap2d()
self.f_ex = M.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation(act_name),
conv2d(w_se, w_in, 1, bias=True),
activation("hsigmoid") if approx_sigmoid else | M.Sigmoid() | megengine.module.Sigmoid |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + sizeof_fmt(max_size)],
1,
)
for j in opr2chunks[i]:
suffix_2 = "MAX" + str(max_num)
j_size = sizeof_fmt(all_chunks[j]["size"])
j_percent = round(all_chunks[j]["size"] / max_size * 100, 3)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["percent: " + str(j_percent) + "%"],
0,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR", ["memory_size: " + j_size], 1,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["owner_opr: " + all_chunks[j]["owner_opr"]],
2,
)
writer.add_node_raw_attributes(
all_chunks[j]["owner_opr"],
{
"memory_" + all_chunks[j]["id"]: j_size,
"memory_percent": str(j_percent) + "%",
"summary_memory_" + str(peak_num): sizeof_fmt(max_size),
},
)
writer.add_node_raw_name_suffix(
all_chunks[j]["owner_opr"], "_" + suffix_1 + "_" + suffix_2
)
max_num += 1
peak_num += 1
writer.add_graph_by_node_raw_list()
def convert(args):
file_process_order = {
"graph.json": comp_graph_plotter,
"StaticMemoryInfo.json": peak_mem_regist,
}
g = os.walk(args.input)
for path, dir_list, file_list in g:
out_path = path.replace(args.input, args.output)
writer = | SummaryWriterExtend(out_path) | megengine.utils.tensorboard.SummaryWriterExtend |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + sizeof_fmt(max_size)],
1,
)
for j in opr2chunks[i]:
suffix_2 = "MAX" + str(max_num)
j_size = | sizeof_fmt(all_chunks[j]["size"]) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + | sizeof_fmt(max_size) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + sizeof_fmt(max_size)],
1,
)
for j in opr2chunks[i]:
suffix_2 = "MAX" + str(max_num)
j_size = sizeof_fmt(all_chunks[j]["size"])
j_percent = round(all_chunks[j]["size"] / max_size * 100, 3)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["percent: " + str(j_percent) + "%"],
0,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR", ["memory_size: " + j_size], 1,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["owner_opr: " + all_chunks[j]["owner_opr"]],
2,
)
writer.add_node_raw_attributes(
all_chunks[j]["owner_opr"],
{
"memory_" + all_chunks[j]["id"]: j_size,
"memory_percent": str(j_percent) + "%",
"summary_memory_" + str(peak_num): | sizeof_fmt(max_size) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[ | sizeof_fmt(jmem["peak_memory"]) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[ | sizeof_fmt(jmem["weight_memory"]) | megengine.utils.module_stats.sizeof_fmt |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = | mge.get_logger(__name__) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@ | jit.trace(symbolic=True, opt_level=2) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = | data.RandomSampler(train_dataset, batch_size, drop_last=True) | megengine.data.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = | mge.load(args.resume) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
| T.RandomHorizontalFlip(0.5) | megengine.data.transform.RandomHorizontalFlip |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
| T.RandomResize(scale_range=(0.5, 2)) | megengine.data.transform.RandomResize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
T.RandomResize(scale_range=(0.5, 2)),
T.RandomCrop(
output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH),
padding_value=[0, 0, 0],
padding_maskvalue=255,
),
| T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD) | megengine.data.transform.Normalize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
T.RandomResize(scale_range=(0.5, 2)),
T.RandomCrop(
output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH),
padding_value=[0, 0, 0],
padding_maskvalue=255,
),
T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD),
| T.ToMode() | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = | tensor() | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = | tensor(dtype=np.int32) | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = | TensorDict() | megengine.core.TensorDict |