prompt
stringlengths
94
42.6k
completion
stringlengths
6
120
api
stringlengths
14
68
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import pytest from basecls.models.regnet import RegBottleneckBlock from basecls.models.resnet import ( AnyStage, ResBasicBlock, ResBottleneckBlock, ResDeepStem, ResStem, SimpleStem, ) @pytest.mark.parametrize("Block", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]) @pytest.mark.parametrize("w_in", [32]) @pytest.mark.parametrize("w_out", [32, 64]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("bot_mul", [1.0, 0.25]) @pytest.mark.parametrize("group_w", [8]) @pytest.mark.parametrize("se_r", [0.0, 0.25]) @pytest.mark.parametrize("avg_down", [True, False]) @pytest.mark.parametrize("drop_path_prob", [0.05, 0.1]) @pytest.mark.parametrize("norm_name", ["BN"]) @pytest.mark.parametrize("act_name", ["relu"]) def test_block( Block, w_in, w_out, stride, bot_mul, group_w, se_r, avg_down, drop_path_prob, norm_name, act_name, ): m = Block( w_in, w_out, stride, bot_mul=bot_mul, group_w=group_w, se_r=se_r, avg_down=avg_down, drop_path_prob=drop_path_prob, norm_name=norm_name, act_name=act_name, ) assert isinstance(m, M.Module) m(
mge.random.normal(size=(2, 32, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import pytest from basecls.models.regnet import RegBottleneckBlock from basecls.models.resnet import ( AnyStage, ResBasicBlock, ResBottleneckBlock, ResDeepStem, ResStem, SimpleStem, ) @pytest.mark.parametrize("Block", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]) @pytest.mark.parametrize("w_in", [32]) @pytest.mark.parametrize("w_out", [32, 64]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("bot_mul", [1.0, 0.25]) @pytest.mark.parametrize("group_w", [8]) @pytest.mark.parametrize("se_r", [0.0, 0.25]) @pytest.mark.parametrize("avg_down", [True, False]) @pytest.mark.parametrize("drop_path_prob", [0.05, 0.1]) @pytest.mark.parametrize("norm_name", ["BN"]) @pytest.mark.parametrize("act_name", ["relu"]) def test_block( Block, w_in, w_out, stride, bot_mul, group_w, se_r, avg_down, drop_path_prob, norm_name, act_name, ): m = Block( w_in, w_out, stride, bot_mul=bot_mul, group_w=group_w, se_r=se_r, avg_down=avg_down, drop_path_prob=drop_path_prob, norm_name=norm_name, act_name=act_name, ) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 32, 8, 8))) @pytest.mark.parametrize("Stem", [ResDeepStem, ResStem, SimpleStem]) @pytest.mark.parametrize("w_in", [3]) @pytest.mark.parametrize("w_out", [8, 16]) @pytest.mark.parametrize("norm_name", ["BN"]) @pytest.mark.parametrize("act_name", ["relu"]) def test_stem(Stem, w_in, w_out, norm_name, act_name): m = Stem(w_in, w_out, norm_name=norm_name, act_name=act_name) assert isinstance(m, M.Module) m(
mge.random.normal(size=(2, 3, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import pytest from basecls.models.regnet import RegBottleneckBlock from basecls.models.resnet import ( AnyStage, ResBasicBlock, ResBottleneckBlock, ResDeepStem, ResStem, SimpleStem, ) @pytest.mark.parametrize("Block", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]) @pytest.mark.parametrize("w_in", [32]) @pytest.mark.parametrize("w_out", [32, 64]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("bot_mul", [1.0, 0.25]) @pytest.mark.parametrize("group_w", [8]) @pytest.mark.parametrize("se_r", [0.0, 0.25]) @pytest.mark.parametrize("avg_down", [True, False]) @pytest.mark.parametrize("drop_path_prob", [0.05, 0.1]) @pytest.mark.parametrize("norm_name", ["BN"]) @pytest.mark.parametrize("act_name", ["relu"]) def test_block( Block, w_in, w_out, stride, bot_mul, group_w, se_r, avg_down, drop_path_prob, norm_name, act_name, ): m = Block( w_in, w_out, stride, bot_mul=bot_mul, group_w=group_w, se_r=se_r, avg_down=avg_down, drop_path_prob=drop_path_prob, norm_name=norm_name, act_name=act_name, ) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 32, 8, 8))) @pytest.mark.parametrize("Stem", [ResDeepStem, ResStem, SimpleStem]) @pytest.mark.parametrize("w_in", [3]) @pytest.mark.parametrize("w_out", [8, 16]) @pytest.mark.parametrize("norm_name", ["BN"]) @pytest.mark.parametrize("act_name", ["relu"]) def test_stem(Stem, w_in, w_out, norm_name, act_name): m = Stem(w_in, w_out, norm_name=norm_name, act_name=act_name) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 3, 8, 8))) @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [4, 8]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("depth", [2]) @pytest.mark.parametrize("block_func", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]) @pytest.mark.parametrize("drop_path_prob", [[0.05, 0.1]]) def test_any_stage(w_in, w_out, stride, depth, block_func, drop_path_prob): m = AnyStage( w_in, w_out, stride, depth, block_func, drop_path_prob, bot_mul=1.0, group_w=4, se_r=0.0, avg_down=False, norm_name="BN", act_name="relu", ) assert isinstance(m, M.Module) assert len(m) == depth m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie =
tensor(c)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b =
Parameter(b, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b =
Parameter(b, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b =
Parameter(b, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 + F.exp(-x)) self.save_for_backward(y) return y def backward(self, grad_y): (y,) = self.saved_tensors return grad_y * y * (1 - y) origin = Sigmoid(0) new = copy.deepcopy(Sigmoid(0)) assert new.param == origin.param def test_none_in_out_grad(): class Test(Function): def forward(self, a, b): return a, b def backward(self, grad_a, grad_b): assert grad_b is None return (grad_a, 0.0) class Simple(Module): def __init__(self, a, b): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 + F.exp(-x)) self.save_for_backward(y) return y def backward(self, grad_y): (y,) = self.saved_tensors return grad_y * y * (1 - y) origin = Sigmoid(0) new = copy.deepcopy(Sigmoid(0)) assert new.param == origin.param def test_none_in_out_grad(): class Test(Function): def forward(self, a, b): return a, b def backward(self, grad_a, grad_b): assert grad_b is None return (grad_a, 0.0) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b =
Parameter(b, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 + F.exp(-x)) self.save_for_backward(y) return y def backward(self, grad_y): (y,) = self.saved_tensors return grad_y * y * (1 - y) origin = Sigmoid(0) new = copy.deepcopy(Sigmoid(0)) assert new.param == origin.param def test_none_in_out_grad(): class Test(Function): def forward(self, a, b): return a, b def backward(self, grad_a, grad_b): assert grad_b is None return (grad_a, 0.0) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer = Test() def forward(self): aa, bb = self.layer(self.a, self.b) return aa, bb a = tensor(np.array([1.0], dtype=np.float32)) b = tensor(np.array([2.0], dtype=np.float32)) net = Simple(a, b) optim = optimizer.SGD(net.parameters(), lr=1.0) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 + F.exp(-x)) self.save_for_backward(y) return y def backward(self, grad_y): (y,) = self.saved_tensors return grad_y * y * (1 - y) origin = Sigmoid(0) new = copy.deepcopy(Sigmoid(0)) assert new.param == origin.param def test_none_in_out_grad(): class Test(Function): def forward(self, a, b): return a, b def backward(self, grad_a, grad_b): assert grad_b is None return (grad_a, 0.0) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer = Test() def forward(self): aa, bb = self.layer(self.a, self.b) return aa, bb a = tensor(np.array([1.0], dtype=np.float32)) b = tensor(np.array([2.0], dtype=np.float32)) net = Simple(a, b) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss, _ = net() gm.backward(loss) optim.step() np.testing.assert_almost_equal( net.a.numpy(), np.array([1.0 - 1.0], dtype=np.float32) ) np.testing.assert_almost_equal( net.b.numpy(), np.array([2.0 - 0.0], dtype=np.float32) ) def test_zero_grad(): class StopGradient(Function): def forward(self, a): return a def backward(self, *_): return None class Simple(Module): def __init__(self, a): super().__init__() self.a =
Parameter(a, dtype=np.float32)
megengine.Parameter
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 + F.exp(-x)) self.save_for_backward(y) return y def backward(self, grad_y): (y,) = self.saved_tensors return grad_y * y * (1 - y) origin = Sigmoid(0) new = copy.deepcopy(Sigmoid(0)) assert new.param == origin.param def test_none_in_out_grad(): class Test(Function): def forward(self, a, b): return a, b def backward(self, grad_a, grad_b): assert grad_b is None return (grad_a, 0.0) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer = Test() def forward(self): aa, bb = self.layer(self.a, self.b) return aa, bb a = tensor(np.array([1.0], dtype=np.float32)) b = tensor(np.array([2.0], dtype=np.float32)) net = Simple(a, b) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss, _ = net() gm.backward(loss) optim.step() np.testing.assert_almost_equal( net.a.numpy(), np.array([1.0 - 1.0], dtype=np.float32) ) np.testing.assert_almost_equal( net.b.numpy(), np.array([2.0 - 0.0], dtype=np.float32) ) def test_zero_grad(): class StopGradient(Function): def forward(self, a): return a def backward(self, *_): return None class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer = StopGradient() def forward(self): b = self.a * 3.0 c = self.a * 4.0 return self.layer(b) + c a = tensor(np.array([1.0], dtype=np.float32)) net = Simple(a) optim = optimizer.SGD(net.parameters(), lr=1.0) gm =
ad.GradManager()
megengine.autodiff.GradManager
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale =
F.maximum(maxv, -minv)
megengine.functional.maximum
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return
F.round(x / scale)
megengine.functional.round
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import copy import numpy as np import megengine.autodiff as ad import megengine.functional as F import megengine.optimizer as optimizer from megengine import Parameter from megengine import Tensor as tensor from megengine import tensor from megengine.core.tensor.function import Function from megengine.module import Module def test_single_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a): self.a = a return a * 10 def backward(self, grad_o): return grad_o * 10 class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a) return x net = Simple(av) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * 10)) np.testing.assert_almost_equal(net.a.numpy(), (av - 10)) def test_multi_input(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b def backward(self, grad_o): return grad_o * self.b * 2, grad_o * self.a * 3 class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv)) np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv)) np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av)) def test_multi_output(): data_shape = (9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) class MulFunc(Function): def forward(self, a, b): self.a = a self.b = b return a * b, a + b def backward(self, grad_1, grad_2): return grad_1 * (self.b + 1), grad_2 * (self.a + 1) class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = MulFunc() def forward(self): x, y = self.layer1(self.a, self.b) return x + y net = Simple(av, bv) gm = ad.GradManager().attach(net.parameters()) opt = optimizer.SGD(net.parameters(), lr=1.0) opt.clear_grad() with gm: loss = net() gm.backward(loss.sum()) opt.step() np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6) np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6) np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6) def test_skip_invalid_grad(): data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) bv = np.random.random(data_shape).astype(np.float32) c = np.random.random(data_shape).astype(np.float32) cookie = tensor(c) class EqWithFakeGrad(Function): def forward(self, a, b): return a + b def backward(self, grad_o): _ = grad_o return cookie, cookie class Simple(Module): def __init__(self, a, b): super().__init__() self.a = Parameter(a, dtype=np.float32) self.b = Parameter(b, dtype=np.float32) self.layer1 = EqWithFakeGrad() def forward(self): x = self.layer1(self.a, self.b) return x net = Simple(av, bv) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net().sum() gm.backward(loss) optim.step() np.testing.assert_almost_equal(net.a.numpy(), av - c) np.testing.assert_almost_equal(net.b.numpy(), bv - c) def test_ste(): class STE(Function): def forward(self, x): maxv, minv = x.max(), x.min() scale = F.maximum(maxv, -minv) / 127 return F.round(x / scale) * scale def backward(self, grad_y): return grad_y class Simple(Module): def __init__(self, a): super().__init__() self.a = Parameter(a, dtype=np.float32) self.layer1 = STE() def forward(self): x = self.layer1(self.a) x = (x * 2.0).sum() return x data_shape = (1, 9, 2, 6) av = np.random.random(data_shape).astype(np.float32) net = Simple(av) optim = optimizer.SGD(net.parameters(), lr=1.0) gm = ad.GradManager().attach(net.parameters()) optim.clear_grad() with gm: loss = net() gm.backward(loss.sum()) optim.step() np.testing.assert_almost_equal( net.a.numpy(), av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape), ) def test_deepcopy(): class Sigmoid(Function): def __init__(self, param): super().__init__() self.param = param def forward(self, x): y = 1 / (1 +
F.exp(-x)
megengine.functional.exp
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals =
F.concat(batch_proposals_list, axis=0)
megengine.functional.concat
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs =
F.concat(batch_probs_list, axis=0)
megengine.functional.concat
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds =
F.cond_take(keep_mask > 0, keep_mask)
megengine.functional.cond_take
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals =
F.minimum(prev_nms_top_n, batch_proposals.shape[0])
megengine.functional.minimum
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0]) idx =
F.argsort(batch_probs, descending=True)
megengine.functional.argsort
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0]) idx = F.argsort(batch_probs, descending=True) topk_idx = idx[:num_proposals].reshape(-1) batch_proposals = batch_proposals[topk_idx].detach() batch_probs = batch_probs[topk_idx].detach() # For each image, run a total-level NMS, and choose topk results. keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000) # num = F.minimum(post_nms_top_n, keep_inds.shape[0]) # keep_inds = keep_inds[:num] batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds] # cons the rois batch_inds = F.ones((batch_rois.shape[0], 1)) * bid batch_rois =
F.concat([batch_inds, batch_rois[:, :4]], axis=1)
megengine.functional.concat
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0]) idx = F.argsort(batch_probs, descending=True) topk_idx = idx[:num_proposals].reshape(-1) batch_proposals = batch_proposals[topk_idx].detach() batch_probs = batch_probs[topk_idx].detach() # For each image, run a total-level NMS, and choose topk results. keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000) # num = F.minimum(post_nms_top_n, keep_inds.shape[0]) # keep_inds = keep_inds[:num] batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds] # cons the rois batch_inds = F.ones((batch_rois.shape[0], 1)) * bid batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1) return_rois.append(batch_rois) return_probs.append(batch_probs) if batch_per_gpu == 1: return batch_rois, batch_probs else: concated_rois =
F.concat(return_rois, axis=0)
megengine.functional.concat
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0]) idx = F.argsort(batch_probs, descending=True) topk_idx = idx[:num_proposals].reshape(-1) batch_proposals = batch_proposals[topk_idx].detach() batch_probs = batch_probs[topk_idx].detach() # For each image, run a total-level NMS, and choose topk results. keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000) # num = F.minimum(post_nms_top_n, keep_inds.shape[0]) # keep_inds = keep_inds[:num] batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds] # cons the rois batch_inds = F.ones((batch_rois.shape[0], 1)) * bid batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1) return_rois.append(batch_rois) return_probs.append(batch_probs) if batch_per_gpu == 1: return batch_rois, batch_probs else: concated_rois = F.concat(return_rois, axis=0) concated_probs =
F.concat(return_probs, axis=0)
megengine.functional.concat
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs = F.softmax(probs)[:, 1] # gather the proposals and probs batch_proposals_list.append(proposals) batch_probs_list.append(probs) batch_proposals = F.concat(batch_proposals_list, axis=0) batch_probs = F.concat(batch_probs_list, axis=0) # filter the boxes with small size. wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1 thresh = box_min_size * im_info[bid, 2] keep_mask = F.prod((wh >= thresh), axis=1) keep_mask = keep_mask + F.equal(keep_mask.sum(), 0) keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask) inds = inds.astype(np.int32) # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0) # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0) batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds] # prev_nms_top_n num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0]) idx = F.argsort(batch_probs, descending=True) topk_idx = idx[:num_proposals].reshape(-1) batch_proposals = batch_proposals[topk_idx].detach() batch_probs = batch_probs[topk_idx].detach() # For each image, run a total-level NMS, and choose topk results. keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000) # num = F.minimum(post_nms_top_n, keep_inds.shape[0]) # keep_inds = keep_inds[:num] batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds] # cons the rois batch_inds =
F.ones((batch_rois.shape[0], 1))
megengine.functional.ones
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr =
tensor(config.bbox_normalize_stds[None, :])
megengine.tensor
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr =
tensor(config.bbox_normalize_means[None, :])
megengine.tensor
import megengine as mge import megengine.functional as F from megengine import tensor import numpy as np from megengine.functional.nn import nms from config import config from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \ filter_boxes_opr, box_overlap_opr # from bbox_opr import box_overlap_opr import pdb def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list, all_anchors_list, im_info): prev_nms_top_n = config.train_prev_nms_top_n \ if is_train else config.test_prev_nms_top_n post_nms_top_n = config.train_post_nms_top_n \ if is_train else config.test_post_nms_top_n batch_per_gpu = config.batch_per_gpu if is_train else 1 nms_threshold = config.rpn_nms_threshold box_min_size = config.rpn_min_box_size bbox_normalize_targets = config.rpn_bbox_normalize_targets bbox_normalize_means = config.bbox_normalize_means bbox_normalize_stds = config.bbox_normalize_stds list_size = len(rpn_bbox_offsets_list) return_rois, return_probs = [], [] batch_per_gpu = rpn_cls_prob_list[0].shape[0] for bid in range(batch_per_gpu): batch_proposals_list = [] batch_probs_list = [] for l in range(list_size): # get proposals and probs offsets = rpn_bbox_offsets_list[l][bid] \ .transpose(1, 2, 0).reshape(-1, 4) if bbox_normalize_targets: std_opr = tensor(config.bbox_normalize_stds[None, :]) mean_opr = tensor(config.bbox_normalize_means[None, :]) pred_offsets = pred_offsets * std_opr pred_offsets = pred_offsets + mean_opr all_anchors = all_anchors_list[l] proposals = bbox_transform_inv_opr(all_anchors, offsets) if config.anchor_within_border: proposals = clip_boxes_opr(proposals, im_info[bid, :]) probs = rpn_cls_prob_list[l][bid] \ .transpose(1,2,0).reshape(-1, 2) probs =
F.softmax(probs)
megengine.functional.softmax
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data1)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net,
mge.tensor(data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [
mge.tensor(data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data),
mge.tensor(data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_dropout(): net = DropoutOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_dropout(): net = DropoutOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_adapetive_avg_pool(): net = AdaptiveAvgPool2dOpr() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_dropout(): net = DropoutOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_adapetive_avg_pool(): net = AdaptiveAvgPool2dOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") @pytest.mark.parametrize( "model", [ "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "resnet18", "resnet50", "resnet101", "resnext50_32x4d", ], ) def test_model(model): data = ( np.random.randint(0, 255, 3 * 224 * 224) .reshape((1, 3, 224, 224)) .astype(np.float32) ) if megengine.__version__ < "1.1.0": commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92" else: commit_id = None net = megengine.hub.load( "megengine/models", model, use_cache=False, commit=commit_id, pretrained=True ) net.eval() tm_module, mge_result = get_traced_module(net,
mge.tensor(data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_dropout(): net = DropoutOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_adapetive_avg_pool(): net = AdaptiveAvgPool2dOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") @pytest.mark.parametrize( "model", [ "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "resnet18", "resnet50", "resnet101", "resnext50_32x4d", ], ) def test_model(model): data = ( np.random.randint(0, 255, 3 * 224 * 224) .reshape((1, 3, 224, 224)) .astype(np.float32) ) if megengine.__version__ < "1.1.0": commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92" else: commit_id = None net = megengine.hub.load( "megengine/models", model, use_cache=False, commit=commit_id, pretrained=True ) net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, 1e-2) def test_xornet(): if megengine.__version__ < "1.1.0": return net = XORNet() net.eval() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) def test_squeeze(): net = SqueezeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize("mode", ["max", "avg"]) def test_pooling(mode): if megengine.__version__ > "0.6.0" and mode == "avg": return net = PoolOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["bn1d", "bn2d"]) def test_batchnorm(mode): net = BnOpr(mode) net.eval() data = net.data1 if mode == "bn1d" else net.data2 tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, max_error) def test_subtensor(): net = SubtensorOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_transpose(): net = TransposeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_concat(): net = FConcatOpr() data = np.random.random((1, 2, 4, 5)).astype(np.float32) list_data = [mge.tensor(data), mge.tensor(data)] tm_module, mge_result = get_traced_module(net, list_data) _test_convert_result( [data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"] ) def test_reshape(): net = ReshapeOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"] ) def test_elemwise(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") @pytest.mark.parametrize( "mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"] ) def test_elemwise_broadcast(mode): net = ElemwiseOpr(mode) tm_module, mge_result = get_traced_module( net, mge.tensor(np.array([2.0]).astype("float32")) ) _test_convert_result( np.array([2.0]), tm_module, mge_result, max_error, input_name="a" ) @pytest.mark.parametrize( "mode", [ "relu", "sigmoid", "tanh", "leaky_relu", "softmax", "silu", "relu6", "hsigmoid", "hswish", ], ) def test_active(mode): if megengine.__version__ < "1.5.0" and mode == "silu": return net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) @pytest.mark.parametrize("mode", ["relu",]) def test_active_inplace(mode): net = ActiveOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4) @pytest.mark.parametrize("mode", ["max", "sum", "mean"]) def test_reduce(mode): net = ReduceOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a") def test_broadcast(): net = BroadcastOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_repeat(): net = RepeatOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten(): net = FlattenOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_dropout(): net = DropoutOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") def test_adapetive_avg_pool(): net = AdaptiveAvgPool2dOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps") @pytest.mark.parametrize( "model", [ "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "resnet18", "resnet50", "resnet101", "resnext50_32x4d", ], ) def test_model(model): data = ( np.random.randint(0, 255, 3 * 224 * 224) .reshape((1, 3, 224, 224)) .astype(np.float32) ) if megengine.__version__ < "1.1.0": commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92" else: commit_id = None net = megengine.hub.load( "megengine/models", model, use_cache=False, commit=commit_id, pretrained=True ) net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(data)) _test_convert_result(data, tm_module, mge_result, 1e-2) def test_xornet(): if megengine.__version__ < "1.1.0": return net = XORNet() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_leakyrelu_model(): if megengine.__version__ < "1.1.0": return net = XORNet_LeakyRelu() net.eval() tm_module, mge_result = get_traced_module(net,
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(
mge.tensor(net.data)
megengine.tensor
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from test.utils import ( ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu, ) import caffe # pylint: disable=import-error import megengine as mge import megengine.hub import numpy as np import pytest from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe from .tm_utils import get_traced_module max_error = 1e-6 tmp_file = "test_module" def _test_convert_result( inputs, trace_module, mge_results, max_err, input_data_type=None, input_scales=None, input_zero_points=None, require_quantize=False, param_fake_quant=False, split_conv_relu=False, fuse_bn=False, input_name="x", convert_backend=1, ): tracedmodule_to_caffe( trace_module, prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel", input_data_type=input_data_type, input_scales=input_scales, input_zero_points=input_zero_points, require_quantize=require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend, ) caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST) for i in caffe_net.blobs.keys(): if isinstance(input_name, list): for idx, name in enumerate(input_name): if name.strip() == i.strip(): caffe_net.blobs[i].data[...] = inputs[idx] break else: if input_name in i: caffe_net.blobs[i].data[...] = inputs break out_dict = caffe_net.forward() if isinstance(mge_results, dict): assert len(list(out_dict.keys())) == len(list(mge_results.keys())) for name in mge_results.keys(): assert name._name in out_dict.keys() assert out_dict[name._name].shape == mge_results[name].shape np.testing.assert_allclose( out_dict[name._name], mge_results[name], atol=max_err ) else: caffe_results = list(out_dict.values())[0] assert caffe_results.shape == mge_results.shape np.testing.assert_allclose( caffe_results, mge_results, rtol=max_err, atol=max_err ) @pytest.mark.parametrize("mode", ["normal", "group", "transpose"]) def test_conv2d(mode): net = ConvOpr(mode) tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_convrelu(): net = ConvRelu2dOpr() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbn(): net = ConvBn2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_convbnrelu(): net = ConvBnRelu2dOpr() net.eval() traced_module, tm_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, traced_module, tm_result, max_error) def test_linear(): net = LinearOpr() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, max_error) def test_flatten_linear(): net = LinearOpr("flatten") tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1)) _test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4) def test_linear_bn(): net = LinearBnOpr() for _ in range(10): net(mge.tensor(net.data)).numpy() net.eval() tm_module, mge_result = get_traced_module(net, mge.tensor(net.data)) _test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True) @pytest.mark.parametrize("mode", [True, False]) def test_matmul_bn(mode): net = MatrixMulBnOpr(mode) for _ in range(10): net(
mge.tensor(net.data)
megengine.tensor
import numpy as np import pytest import megengine.functional as F from megengine import tensor from megengine.test import assertTensorClose def test_onehot_low_dimension(): inp = tensor(np.arange(1, 4, dtype=np.int32)) out =
F.one_hot(inp)
megengine.functional.one_hot
import numpy as np import pytest import megengine.functional as F from megengine import tensor from megengine.test import assertTensorClose def test_onehot_low_dimension(): inp = tensor(np.arange(1, 4, dtype=np.int32)) out = F.one_hot(inp) assertTensorClose( out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)] ) def test_onehot_high_dimension(): arr = np.array( [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]], dtype=np.int32 ) inp =
tensor(arr)
megengine.tensor
import numpy as np import pytest import megengine.functional as F from megengine import tensor from megengine.test import assertTensorClose def test_onehot_low_dimension(): inp = tensor(np.arange(1, 4, dtype=np.int32)) out = F.one_hot(inp) assertTensorClose( out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)] ) def test_onehot_high_dimension(): arr = np.array( [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]], dtype=np.int32 ) inp = tensor(arr) out =
F.one_hot(inp, 10)
megengine.functional.one_hot
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img =
F.remap(img, coords, border_mode="constant")
megengine.functional.remap
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords =
F.stack([x_grid, y_grid], axis=0)
megengine.functional.stack
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask =
F.logical_not(mask)
megengine.functional.logical_not
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid =
mge.tensor(y_grid, dtype="float32")
megengine.tensor
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(
F.expand_dims(coords, axis=0)
megengine.functional.expand_dims
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u =
F.repeat(x[:, :, 0:1, :], pady, axis=2)
megengine.functional.repeat
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u = F.repeat(x[:, :, 0:1, :], pady, axis=2) d =
F.repeat(x[:, :, -1:, :], pady, axis=2)
megengine.functional.repeat
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u = F.repeat(x[:, :, 0:1, :], pady, axis=2) d = F.repeat(x[:, :, -1:, :], pady, axis=2) x =
F.concat([u, x, d], axis=2)
megengine.functional.concat
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u = F.repeat(x[:, :, 0:1, :], pady, axis=2) d = F.repeat(x[:, :, -1:, :], pady, axis=2) x = F.concat([u, x, d], axis=2) if padx > 0: l =
F.repeat(x[:, :, :, 0:1], padx, axis=3)
megengine.functional.repeat
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u = F.repeat(x[:, :, 0:1, :], pady, axis=2) d = F.repeat(x[:, :, -1:, :], pady, axis=2) x = F.concat([u, x, d], axis=2) if padx > 0: l = F.repeat(x[:, :, :, 0:1], padx, axis=3) r =
F.repeat(x[:, :, :, -1:], padx, axis=3)
megengine.functional.repeat
import megengine as mge import megengine.functional as F import numpy as np def bilinear_sampler(img, coords, mode="bilinear", mask=False): """Wrapper for grid_sample, uses pixel coordinates""" H, W = img.shape[-2:] img = F.remap(img, coords, border_mode="constant") if mask: mask = ( (coords[:, :, :, 0:1] < 0) | (coords[:, :, :, 0:1] > W - 1) | (coords[:, :, :, 1:2] < 0) | (coords[:, :, :, 1:2] > H - 1) ) mask = F.logical_not(mask) return img, mask.astype("float32") return img def coords_grid(batch, ht, wd): x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht)) y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor( x_grid, dtype="float32" ) coords = F.stack([x_grid, y_grid], axis=0) coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0) return coords def manual_pad(x, pady, padx): if pady > 0: u = F.repeat(x[:, :, 0:1, :], pady, axis=2) d = F.repeat(x[:, :, -1:, :], pady, axis=2) x = F.concat([u, x, d], axis=2) if padx > 0: l = F.repeat(x[:, :, :, 0:1], padx, axis=3) r = F.repeat(x[:, :, :, -1:], padx, axis=3) x =
F.concat([l, x, r], axis=3)
megengine.functional.concat
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset =
ArrayDataset(rand_data, label)
megengine.data.dataset.ArrayDataset
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader =
DataLoader(dataset)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler =
StreamSampler(batch_size=4)
megengine.data.sampler.StreamSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler =
StreamSampler(batch_size=4)
megengine.data.sampler.StreamSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader =
DataLoader(dataset, sampler)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler =
StreamSampler(batch_size=4)
megengine.data.sampler.StreamSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader =
DataLoader(dataset, sampler, num_workers=num_workers, timeout=2)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False) ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) def test_dataloader_parallel(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=False, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=True, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) @pytest.mark.skipif( platform.system() == "Windows", reason="dataloader do not support parallel on windows", ) def test_dataloader_parallel_timeout(): dataset = init_dataset() class TimeoutTransform(Transform): def __init__(self): pass def apply(self, input): time.sleep(10) return input dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), transform=TimeoutTransform(), num_workers=2, timeout=2, ) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) batch_data = next(data_iter) @pytest.mark.skipif( platform.system() == "Windows", reason="dataloader do not support parallel on windows", ) def test_dataloader_parallel_worker_exception(): dataset = init_dataset() class FakeErrorTransform(Transform): def __init__(self): pass def apply(self, input): raise RuntimeError("test raise error") return input dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), transform=FakeErrorTransform(), num_workers=2, ) with pytest.raises(RuntimeError, match=r"worker.*died"): data_iter = iter(dataloader) batch_data = next(data_iter) def _multi_instances_parallel_dataloader_worker(): dataset = init_dataset() for divide_flag in [True, False]: train_dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=divide_flag, ) val_dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=10, drop_last=False), num_workers=2, divide=divide_flag, ) for idx, (data, label) in enumerate(train_dataloader): assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) if idx % 5 == 0: for val_data, val_label in val_dataloader: assert val_data.shape == (10, 1, 32, 32) assert val_label.shape == (10,) def test_dataloader_parallel_multi_instances(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" _multi_instances_parallel_dataloader_worker() @pytest.mark.isolated_distributed def test_dataloader_parallel_multi_instances_multiprocessing(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" import multiprocessing as mp # mp.set_start_method("spawn") processes = [] for i in range(4): p = mp.Process(target=_multi_instances_parallel_dataloader_worker) p.start() processes.append(p) for p in processes: p.join() assert p.exitcode == 0 @pytest.mark.parametrize("num_workers", [0, 2]) def test_timeout_event(num_workers): def cb(): return (True, (np.zeros(shape=(2, 2, 2, 3)), np.ones(shape=(2,)))) dataset = MyStream(100, block=True) sampler =
StreamSampler(batch_size=4)
megengine.data.sampler.StreamSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader =
DataLoader(dataset, num_workers=2, divide=True)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader =
DataLoader(dataset, num_workers=-1)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader =
DataLoader(dataset, timeout=-1)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader =
DataLoader(dataset, num_workers=0, divide=True)
megengine.data.dataloader.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=6, drop_last=False)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=6, drop_last=True)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=4, drop_last=False)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False) ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) def test_dataloader_parallel(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" dataset = init_dataset() dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=4, drop_last=False)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False) ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) def test_dataloader_parallel(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=False, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=4, drop_last=False)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False) ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) def test_dataloader_parallel(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=False, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=True, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) @pytest.mark.skipif( platform.system() == "Windows", reason="dataloader do not support parallel on windows", ) def test_dataloader_parallel_timeout(): dataset = init_dataset() class TimeoutTransform(Transform): def __init__(self): pass def apply(self, input): time.sleep(10) return input dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=4, drop_last=False)
megengine.data.sampler.RandomSampler
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import platform import time import numpy as np import pytest from megengine.data.collator import Collator from megengine.data.dataloader import DataLoader from megengine.data.dataset import ArrayDataset, StreamDataset from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler from megengine.data.transform import ( Compose, Normalize, PseudoTransform, ToMode, Transform, ) def init_dataset(): sample_num = 100 rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8) label = np.random.randint(0, 10, size=(sample_num,), dtype=int) dataset = ArrayDataset(rand_data, label) return dataset def test_dataloader_init(): dataset = init_dataset() with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=2, divide=True) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, timeout=-1) with pytest.raises(ValueError): dataloader = DataLoader(dataset, num_workers=0, divide=True) dataloader = DataLoader(dataset) assert isinstance(dataloader.sampler, SequentialSampler) assert isinstance(dataloader.transform, PseudoTransform) assert isinstance(dataloader.collator, Collator) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False) ) assert len(dataloader) == 17 dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True) ) assert len(dataloader) == 16 class MyStream(StreamDataset): def __init__(self, number, batch=False, error_foramt=False, block=False): self.number = number self.batch = batch self.error_format = error_foramt self.block = block def __iter__(self): for cnt in range(self.number): if self.block: for _ in range(10): time.sleep(1) if self.batch: data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8") yield (True, (data, [cnt, cnt - self.number])) else: data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8") if self.error_format: yield (data, cnt) else: yield (False, (data, cnt)) raise StopIteration @pytest.mark.parametrize("batch", [True, False]) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader(batch, num_workers): dataset = MyStream(100, batch=batch) sampler = StreamSampler(batch_size=4) dataloader = DataLoader( dataset, sampler, Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]), num_workers=num_workers, ) check_set = set() for step, data in enumerate(dataloader): if step == 10: break assert data[0].shape == (4, 3, 2, 2) assert data[1].shape == (4,) for i in data[1]: assert i not in check_set check_set.add(i) def test_stream_dataloader_error(): dataset = MyStream(100, error_foramt=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler) with pytest.raises(AssertionError, match=r".*tuple.*"): data_iter = iter(dataloader) next(data_iter) @pytest.mark.parametrize("num_workers", [0, 2]) def test_stream_dataloader_timeout(num_workers): dataset = MyStream(100, False, block=True) sampler = StreamSampler(batch_size=4) dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) next(data_iter) def test_dataloader_serial(): dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False) ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) def test_dataloader_parallel(): # set max shared memory to 100M os.environ["MGE_PLASMA_MEMORY"] = "100000000" dataset = init_dataset() dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=False, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), num_workers=2, divide=True, ) for (data, label) in dataloader: assert data.shape == (4, 1, 32, 32) assert label.shape == (4,) @pytest.mark.skipif( platform.system() == "Windows", reason="dataloader do not support parallel on windows", ) def test_dataloader_parallel_timeout(): dataset = init_dataset() class TimeoutTransform(Transform): def __init__(self): pass def apply(self, input): time.sleep(10) return input dataloader = DataLoader( dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False), transform=TimeoutTransform(), num_workers=2, timeout=2, ) with pytest.raises(RuntimeError, match=r".*timeout.*"): data_iter = iter(dataloader) batch_data = next(data_iter) @pytest.mark.skipif( platform.system() == "Windows", reason="dataloader do not support parallel on windows", ) def test_dataloader_parallel_worker_exception(): dataset = init_dataset() class FakeErrorTransform(Transform): def __init__(self): pass def apply(self, input): raise RuntimeError("test raise error") return input dataloader = DataLoader( dataset, sampler=
RandomSampler(dataset, batch_size=4, drop_last=False)
megengine.data.sampler.RandomSampler