import mindspore
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from mindspore import log as logger
from mindspore.common import dtype_to_nptype
from mindspore.common.tensor import Tensor
from mindspore.nn import Cell
from mindspore.ops import operations as op

from ...grad import GradOfFirstInput, GradOfAllInputs
from ...meta import OpsFactory
from ...utils import allclose_nparray

tf.disable_v2_behavior()

tf.config.experimental.set_visible_devices([], 'GPU')
tf.compat.v1.disable_eager_execution()


class NotEqual(Cell):

    def __init__(self):
        super(NotEqual, self).__init__()
        self.notequal = op.NotEqual()

    def construct(self, x, y):
        out = self.notequal(x, y)
        return out


class NotEqualMock(OpsFactory):

    def __init__(self, x, y, grads=None):
        self.ms_type = x.dtype
        super().__init__(dtype=dtype_to_nptype(self.ms_type))

        self.x = x
        self.y = y

        if grads is None or len(grads) == 0:
            self.output_grad_np = None
        else:
            self.output_grad_np = grads[0].asnumpy()

    def forward_cmp(self):
        out_tf = self.forward_tensorflow_impl()
        out_mindspore = self.forward_mindspore_impl()
        if self.dtype == np.complex64 or np.complex128:
            ms_real = np.real(out_mindspore)
            py_real = np.real(out_tf)
            ms_imag = np.imag(out_mindspore)
            py_imag = np.imag(out_tf)
            allclose_nparray(py_real, ms_real, self.loss, self.loss)
            allclose_nparray(py_imag, ms_imag, self.loss, self.loss)
        else:
            allclose_nparray(out_tf, out_mindspore, self.loss, self.loss)

    def forward_mindspore_impl(self):
        net = NotEqual()
        out = net(self.x, self.y)
        return out.asnumpy()

    def forward_mindspore_dynamic_shape_impl(self):
        net = NotEqual()
        input_x = Tensor(self.x, shape=[None for _ in self.x.shape][0], dtype=self.x.dtype)
        input_y = Tensor(self.y, shape=[None for _ in self.y.shape][0], dtype=self.y.dtype)

        net.set_inputs(input_x, input_y)

        out = net(self.x, self.y)
        return out.asnumpy()

    def forward_tensorflow_impl(self):
        x = tf.Variable(self.x.asnumpy())
        y = tf.Variable(self.y.asnumpy())

        with tf.compat.v1.Session() as sess:
            sess.run(tf.compat.v1.global_variables_initializer())
            out = sess.run(tf.raw_ops.NotEqual(x=x, y=y))
        return out.astype(self.dtype)

    def forward_dynamic_shape_cmp(self):
        out_mindspore = self.forward_mindspore_dynamic_shape_impl()
        out_tf = self.forward_tensorflow_impl()
        allclose_nparray(out_tf, out_mindspore, self.loss, self.loss)

    def grad_cmp(self):
        input_grad_mindspore = self.grad_mindspore_impl()
        input_grad_tf = self.grad_tensorflow_impl()

        if self.dtype == np.complex64 or np.complex128:
            ms_real = np.real(input_grad_mindspore)
            tf_real = np.real(input_grad_tf)
            ms_imag = np.imag(input_grad_mindspore)
            tf_imag = np.imag(input_grad_tf)
            allclose_nparray(tf_real, ms_real, self.loss, self.loss)
            allclose_nparray(tf_imag, ms_imag, self.loss, self.loss)
        else:
            allclose_nparray(input_grad_tf, input_grad_mindspore, self.loss, self.loss)

    def grad_mindspore_impl(self):
        if self.output_grad_np is None:
            self.output_grad_np = np.random.randn(
                *list(self.forward_tensorflow_impl().shape)).astype(self.dtype)
        out_grad_np_copy = self.output_grad_np
        out_grad_np = Tensor(out_grad_np_copy.astype(np.bool))

        notequal_net = NotEqual()
        notequal_grad = GradOfAllInputs(notequal_net)
        notequal_grad.set_train()
        out_grad = notequal_grad(self.x, self.y, out_grad_np)
        return out_grad

    def grad_tensorflow_impl(self):
        x_np = tf.Variable(self.x.asnumpy())
        y_np = tf.Variable(self.y.asnumpy())
        net_copy = tf.raw_ops.NotEqual(x=x_np, y=y_np)
        net = tf.cast(net_copy, dtype=self.dtype)
        grad_net = tf.gradients(net, [x_np, y_np], grad_ys=self.output_grad_np.copy())

        with tf.compat.v1.Session() as sess:
            sess.run(tf.compat.v1.global_variables_initializer())
            out = sess.run(grad_net)
        return out[0].astype(self.dtype)

    def grad_dynamic_shape_cmp(self):
        input_grad_mindspore = self.grad_mindspore_dynamic_shape_impl()
        input_grad_tf = self.grad_tensorflow_impl()
        allclose_nparray(input_grad_tf, input_grad_mindspore, self.loss, self.loss)

    def grad_mindspore_dynamic_shape_impl(self):
        if self.output_grad_np is None:
            self.output_grad_np = np.random.randn(
                *list(self.forward_tensorflow_impl().shape)).astype(self.dtype)
        out_grad_np_copy = self.output_grad_np
        out_grad_np = Tensor(out_grad_np_copy.astype(np.bool))
        notequal_net = NotEqual()
        notequal_grad = GradOfAllInputs(notequal_net)
        notequal_grad.set_train()

        input_x_dyn = Tensor(self.x, shape=[None for _ in self.x.shape][0], dtype=self.x.dtype)
        input_y_dyn = Tensor(self.y, shape=[None for _ in self.y.shape][0], dtype=self.y.dtype)
        output_grad_dyn = Tensor(out_grad_np, shape=[None for _ in out_grad_np.shape][0], dtype=out_grad_np.dtype)

        notequal_grad.set_inputs(input_x_dyn, input_y_dyn, output_grad_dyn)
        out_grad = notequal_grad(self.x, self.y, out_grad_np)

        return out_grad
