import sys
import getopt
import numpy as np
import mindspore.context as context
from mindspore import Tensor
from mindspore import nn 
import mindspore.ops.operations as P
from mindspore.common import dtype as mstype

opts,_ = getopt.getopt(sys.argv[1:], "d:t:")
opts = dict(opts)
dev_id = 0 if '-d' not in opts else int(opts['-d'])
test_type = 0 if '-t' not in opts else int(opts['-t'])

context.set_context(mode=context.GRAPH_MODE, device_target="GPU", device_id=dev_id)
if test_type > 0:
    context.set_context(enable_graph_kernel=True)
if test_type > 1:
    context.set_context(graph_kernel_flags="--enable_parallel_fusion")

class Net(nn.Cell):
    def __init__(self):
        super(Net, self).__init__()
        self.cast = P.Cast()
        self.log = P.Log()
        self.less_equal = P.LessEqual()
        self.exp = P.Exp()

    def construct(self, x1, x2, x3, x4, x5, x6):
        m1 = x1 * x2
        a1 = x6 + 1
        m2 = m1 * x5
        log1 = self.log(a1)
        a2 = m2 + x4
        le1 = self.less_equal(x3, 0.8999999761581421)
        m3 = log1 * -0.5
        m4 = 1.1111111640930176 * a2
        c1 = self.cast(le1, mstype.float32)
        exp1 = self.exp(m3)
        m5 = m4 * c1
        m6 = exp1 * exp1
        m8 = exp1 * m6
        return (m5, c1), (exp1, m8)

test_shape = [16, 128, 64]
shape0 = test_shape
shape1 = [*test_shape[:-1], 1]
shape2 = [test_shape[-1]]
i0 = Tensor(np.random.normal(0, 1, shape0).astype(np.float16))
i1 = Tensor(np.random.normal(0, 1, shape1).astype(np.float16))
i2 = Tensor(np.random.normal(0, 1, shape0).astype(np.float16))
i3 = Tensor(np.random.normal(0, 1, shape2).astype(np.float16))
i4 = Tensor(np.random.normal(0, 1, shape2).astype(np.float16))
i5 = Tensor(np.random.normal(0, 1, shape0).astype(np.float16))

Net()(i0, i1, i2, i3, i4, i5)
