prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
| sync() | megengine.core._imperative_rt.imperative.sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze( | F.expand_dims(x, 2) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.sum(axis=0)
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.sum(axis=0)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3), dtype=np.float32), x.grad.numpy())
def test_Reduce_mean():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.mean(axis=0)
grad(y, | F.ones_like(y) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.sum(axis=0)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3), dtype=np.float32), x.grad.numpy())
def test_Reduce_mean():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device= | mge.device.get_default_device() | megengine.device.get_default_device |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with | Grad() | megengine.core.autodiff.grad.Grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
| dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend) | megengine.distributed.init_process_group |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
| dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend) | megengine.distributed.init_process_group |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = | mgb.config.create_mm_server("0.0.0.0", 0) | megengine._internal.config.create_mm_server |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = | mgb.config.create_mm_server("0.0.0.0", port) | megengine._internal.config.create_mm_server |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
| dist.group_barrier() | megengine.distributed.group_barrier |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
backend = "nccl"
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
| dist.group_barrier() | megengine.distributed.group_barrier |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put( | dist.get_master_port() | megengine.distributed.get_master_port |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not | mge.is_cuda_available() | megengine.is_cuda_available |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not | mge.is_cuda_available() | megengine.is_cuda_available |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert | dist.is_distributed() | megengine.distributed.is_distributed |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert | dist.get_master_ip() | megengine.distributed.get_master_ip |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert | dist.get_master_port() | megengine.distributed.get_master_port |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert | dist.get_world_size() | megengine.distributed.get_world_size |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert | dist.get_rank() | megengine.distributed.get_rank |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert | dist.get_backend() | megengine.distributed.get_backend |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not | mge.is_cuda_available() | megengine.is_cuda_available |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
| dist.group_barrier() | megengine.distributed.group_barrier |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
| dist.group_barrier() | megengine.distributed.group_barrier |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
backend = "nccl"
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
if not | mge.is_cuda_available() | megengine.is_cuda_available |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@ | trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = | tensor([4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = | cgtools.load_and_inference(file, [a, b]) | megengine.utils.comp_graph_tools.load_and_inference |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = | tensor([3]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = | cgtools.load_and_inference(file, [x]) | megengine.utils.comp_graph_tools.load_and_inference |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = | tensor([3]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = | G.load_graph(file) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = | tensor(0.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = | tensor(val) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = | tensor(0.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@ | trace(capture_as_const=True, symbolic_shape=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = | tensor([0], dtype=np.int32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = | tensor([5, 2, 7, 1, 0, 3, 2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = | tensor([1, 2, 3, 4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = | tensor([5, 6, 7, 8]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = | tensor([9, 0, 1, 2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
def test_trace_broadcast():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@ | trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
def test_trace_broadcast():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@ | trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
def test_trace_reshape():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(1):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
def test_trace_broadcast():
for symbolic in [False, True]:
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=symbolic, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@trace(symbolic=False)
def f(x, shape):
y = F.broadcast_to(x, shape)
return y
f(x1, shape)
f(x2, shape)
def test_clip():
x = tensor(np.random.randn(10, 10))
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@ | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@ | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@ | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@ | trace(symbolic=symbolic, profiling=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f( | tensor(1.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g( | tensor(1.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return | exp(x) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
def test_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=False)
def train_f1(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
@trace(symbolic=True)
def train_f2(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(2):
y1 = train_f1(data).numpy()
y2 = train_f2(data).numpy()
np.testing.assert_equal(y1, y2)
def test_exclude_from_trace():
for symbolic in [False, True]:
@trace(symbolic=symbolic)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
file.seek(0)
result = cgtools.load_and_inference(file, [a, b])
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
result = cgtools.load_and_inference(file, [x])
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
def test_trace_profiler():
for symbolic in [False, True]:
@trace(symbolic=symbolic, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
@pytest.mark.skip(reason="force opt_level=0 when building graph")
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
@pytest.mark.skip(reason="could not use opt_level=0 with dump")
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f( | tensor(5.0) | megengine.tensor |