#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
from op_test import (
    OpTest,
    check_cudnn_version_and_compute_capability,
    convert_float_to_uint16,
    get_device_place,
    get_places,
    is_custom_device,
)
from utils import dygraph_guard, static_guard

import paddle
import paddle.nn.functional as F
from paddle import base, compat
from paddle.base import core

np.random.seed(10)


def stable_softmax(x):
    """Compute the softmax of vector x in a numerically stable way."""
    # clip to shiftx, otherwise, when calc loss with
    # log(exp(shiftx)), may get log(0)=INF
    shiftx = (x - np.max(x)).clip(-64.0)
    exps = np.exp(shiftx)
    return exps / np.sum(exps)


def ref_softmax(x, axis=None, dtype=None):
    x_t = x.copy()
    if dtype is not None:
        x_t = x_t.astype(dtype)
    if axis is None:
        axis = -1
    return np.apply_along_axis(stable_softmax, axis, x_t)


class TestSoftmaxOp(OpTest):
    def get_x_shape(self):
        return [10, 10]

    def get_axis(self):
        return -1

    def setUp(self):
        self.op_type = "softmax"
        self.prim_op_type = "comp"
        self.python_api = F.softmax
        self.public_python_api = F.softmax
        self.use_cudnn = False
        self.use_onednn = False
        # explicitly use float32 for ROCm, as MIOpen does not yet support float64
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
        self.init_kernel_type()
        self.shape = self.get_x_shape()
        self.axis = self.get_axis()

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.apply_along_axis(stable_softmax, self.axis, x)

        self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {
            'axis': self.axis,
            'use_cudnn': self.use_cudnn,
            'use_onednn': self.use_onednn,
        }
        self.enable_cinn = True

    def init_kernel_type(self):
        pass

    def test_check_output(self):
        # TODO(wangzhongpu): support onednn op in dygraph mode
        if self.use_cudnn:
            place = get_device_place()
            self.check_output_with_place(
                place,
                atol=1e-5,
                check_prim=False,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )
        else:
            self.check_output(
                check_prim=False,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )

    def test_check_grad(self):
        # TODO(wangzhongpu): support onednn op in dygraph mode
        if self.use_cudnn or self.dtype == np.float16:
            place = get_device_place()
            if core.is_float16_supported(place):
                self.check_grad_with_place(
                    place,
                    ["X"],
                    "Out",
                    max_relative_error=0.01,
                    check_dygraph=(not self.use_onednn),
                    check_pir=True,
                    check_prim_pir=True,
                    check_pir_onednn=self.check_pir_onednn,
                )
        else:
            self.check_grad(
                ["X"],
                "Out",
                max_relative_error=0.01,
                check_dygraph=(not self.use_onednn),
                check_prim=False,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
            )


class TestSoftmaxOpfp32(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float32


class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
    def setUp(self):
        self.op_type = "softmax"
        self.prim_op_type = "comp"
        self.python_api = F.softmax
        self.public_python_api = F.softmax
        self.use_cudnn = False
        self.use_onednn = False
        # explicitly use float32 for ROCm, as MIOpen does not yet support float64
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
        self.init_kernel_type()

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        out = np.array(1.0).astype(self.dtype)

        self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {
            'axis': -1,
            'use_cudnn': self.use_cudnn,
            'use_onednn': self.use_onednn,
        }
        self.enable_cinn = False

    def test_check_output(self):
        # TODO(wangzhongpu): support onednn op in dygraph mode
        if self.use_cudnn:
            place = get_device_place()
            self.check_output_with_place(
                place,
                atol=1e-5,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )
        else:
            self.check_output(
                check_prim=False,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp):
    def setUp(self):
        self.op_type = "softmax"
        self.python_api = F.softmax
        self.public_python_api = F.softmax
        self.prim_op_type = "comp"
        self.use_cudnn = True
        self.use_onednn = False
        # explicitly use float32 for ROCm, as MIOpen does not yet support float64
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        out = np.array(1.0).astype(self.dtype)

        self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {
            'axis': -1,
            'use_cudnn': self.use_cudnn,
            'use_onednn': self.use_onednn,
        }
        self.enable_cinn = False

    def test_check_output(self):
        # TODO(wangzhongpu): support onednn op in dygraph mode
        if self.use_cudnn:
            place = get_device_place()
            self.check_output_with_place(
                place,
                check_prim=False,
                atol=1e-5,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )
        else:
            self.check_output(
                check_prim=False,
                check_pir=True,
                check_prim_pir=True,
                check_pir_onednn=self.check_pir_onednn,
                check_symbol_infer=False,
            )


class TestSoftmaxOp2(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


class TestSoftmaxOp3(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


class TestSoftmaxOp4(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


class TestSoftmaxOp5(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


class TestSoftmaxOp6(TestSoftmaxOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
        self.use_cudnn = True


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 0


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 1


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 2


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]

    def get_axis(self):
        return 3


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp7(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 0


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 1


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 2


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 3


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5, 6]

    def get_axis(self):
        return 4


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxFP16Op(TestSoftmaxOp):
    def init_kernel_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda() or is_custom_device():
            place = get_device_place()
            if core.is_float16_supported(place):
                self.check_output_with_place(
                    place,
                    atol=1e-3,
                    check_prim=False,
                    check_pir=True,
                    check_prim_pir=True,
                    check_pir_onednn=self.check_pir_onednn,
                )

    # FIXME: If the x_shape is [10, 10], gradient failed.
    def test_check_grad(self):
        pass


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
    def get_x_shape(self):
        return [2, 3, 4, 10]


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
    def init_kernel_type(self):
        self.use_cudnn = True
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda() or is_custom_device():
            place = get_device_place()
            if core.is_float16_supported(place):
                self.check_output_with_place(
                    place,
                    atol=1e-3,
                    check_prim=False,
                    check_pir=True,
                    check_prim_pir=True,
                    check_pir_onednn=self.check_pir_onednn,
                )


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device()),
    "core is not compiled with CUDA",
)
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
    def get_x_shape(self):
        return [2, 3, 4, 5]


@unittest.skipIf(
    not (core.is_compiled_with_cuda() or is_custom_device())
    or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
)
class TestSoftmaxBF16Op(OpTest):
    def setUp(self):
        self.op_type = "softmax"
        self.prim_op_type = "comp"
        self.python_api = F.softmax
        self.public_python_api = F.softmax
        self.use_cudnn = self.init_cudnn()
        self.use_onednn = False
        self.dtype = np.uint16
        self.shape = [10, 10]
        self.axis = -1

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
        out = np.apply_along_axis(stable_softmax, self.axis, x)

        self.inputs = {
            'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
        self.attrs = {
            'axis': self.axis,
            'use_cudnn': self.use_cudnn,
            'use_onednn': self.use_onednn,
        }

    def init_cudnn(self):
        return False

    def test_check_output(self):
        place = get_device_place()
        self.check_output_with_place(
            place,
            check_dygraph=(not self.use_onednn),
            check_prim=False,
            check_pir=(not self.use_onednn),
            check_prim_pir=(not self.use_onednn),
            check_pir_onednn=self.check_pir_onednn,
            check_symbol_infer=False,
        )

    def test_check_grad(self):
        place = get_device_place()
        self.check_grad_with_place(
            place,
            ["X"],
            "Out",
            numeric_grad_delta=0.05,
            check_dygraph=(not self.use_onednn),
            check_prim=False,
            check_pir=(not self.use_onednn),
            check_prim_pir=(not self.use_onednn),
            check_pir_onednn=self.check_pir_onednn,
        )


@unittest.skipIf(
    not check_cudnn_version_and_compute_capability(8100, 8),
    "only support compiled with CUDA or custom device, and for CUDA cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
)
class TestSoftmaxBF16CUDNNOp(TestSoftmaxBF16Op):
    def init_cudnn(self):
        return True


class TestSoftmaxAPI(unittest.TestCase):
    def setUp(self):
        self.place = get_device_place()
        self.x_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype('float32')
        self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
        self.executed_api()

    def executed_api(self):
        self.softmax = F.softmax

    def test_static_check(self):
        with static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, 'float32')
                out1 = self.softmax(x)
                m = paddle.nn.Softmax()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)

    def test_dygraph_check(self):
        paddle.disable_static(self.place)

        x = paddle.to_tensor(self.x_np)
        out1 = self.softmax(x)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.Softmax()
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
        for r in [out1, out2]:
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

        out1 = self.softmax(x, axis=0)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.Softmax(axis=0)
        out2 = m(x)
        out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
        for r in [out1, out2]:
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

        # explicitly use float32 for ROCm, as MIOpen does not yet support float64
        if core.is_compiled_with_rocm():
            out = self.softmax(x, dtype=np.float32)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32)
        else:
            out = self.softmax(x, dtype=np.float64)
            out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64)
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

        paddle.enable_static()

    def test_error(self):
        with (
            static_guard(),
            paddle.static.program_guard(paddle.static.Program()),
        ):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.softmax, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.static.data(
                name='x_int32', shape=[2, 3], dtype='int32'
            )
            self.assertRaises(TypeError, self.softmax, x_int32)

            if core.is_compiled_with_cuda() or is_custom_device():
                x_fp16 = paddle.static.data(
                    name='x_fp16', shape=[2, 3], dtype='float16'
                )
                self.softmax(x_fp16)


class TestSoftmaxAPI_ZeroDim(unittest.TestCase):
    def test_dygraph(self):
        paddle.disable_static()
        x = paddle.rand([])
        x.stop_gradient = False
        x.retain_grads()

        out = paddle.nn.functional.softmax(x)
        out.retain_grads()
        out.backward()
        self.assertEqual(x.shape, [])
        self.assertEqual(x.grad.shape, [])
        self.assertEqual(out.shape, [])
        self.assertEqual(out.grad.shape, [])

        paddle.enable_static()

    def test_static(self):
        with static_guard():
            main_prog = base.Program()
            with base.program_guard(main_prog, base.Program()):
                x = paddle.rand([])
                x.stop_gradient = False
                out = paddle.nn.functional.softmax(x)

                # Test compile shape
                self.assertEqual(tuple(x.shape), ())
                self.assertEqual(tuple(out.shape), ())

                exe = base.Executor()
                result = exe.run(main_prog, fetch_list=[x, out])

                # Test runtime shape
                self.assertEqual(tuple(result[0].shape), ())
                self.assertEqual(tuple(result[1].shape), ())


class TestSoftmaxInplaceAPI(TestSoftmaxAPI):
    def executed_api(self):
        self.softmax = F.softmax_


class TestSoftmaxAPI_ZeroSize(unittest.TestCase):
    def test_dygraph(self):
        for place in get_places():
            paddle.disable_static(place)
            x = paddle.rand([0, 2, 3])
            x.stop_gradient = False
            x.retain_grads()
            out = paddle.nn.functional.softmax(x)
            out.retain_grads()
            out.backward()
            np.testing.assert_allclose(out.numpy(), np.random.random([0, 2, 3]))
            np.testing.assert_allclose(x.grad.shape, x.shape)
            paddle.enable_static()


class TestSoftmaxCompatibility(unittest.TestCase):
    def setUp(self):
        self.input = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
        self.axes = [0, 1]
        self.places = [paddle.CPUPlace()]
        if paddle.base.core.is_compiled_with_cuda() or is_custom_device():
            self.places.append(get_device_place())

    def test_gather_with_param_aliases(self):
        with dygraph_guard():
            for place in self.places:
                paddle.device.set_device(place)
                for axis in self.axes:
                    input_tensor = paddle.to_tensor(self.input, dtype='float32')
                    for param_x in ['x', 'input']:
                        for param_axis in ['axis', 'dim']:
                            kwargs = {param_x: input_tensor, param_axis: axis}
                            result = paddle.nn.functional.softmax(**kwargs)
                            expected = np.exp(
                                input_tensor.numpy()
                                - np.max(
                                    input_tensor.numpy(),
                                    axis=axis,
                                    keepdims=True,
                                )
                            )
                            expected = expected / np.sum(
                                expected, axis=axis, keepdims=True
                            )
                            np.testing.assert_allclose(
                                (
                                    result.numpy()
                                    if place.is_cpu_place()
                                    else result.cpu().numpy()
                                ),
                                expected,
                                rtol=1e-5,
                                err_msg=f"Failed at axis={axis}, param_x={param_x}, param_axis={param_axis}",
                            )


class TestSoftmaxAPI_CompatibleWithTorch1(TestSoftmaxAPI):
    # paddle.nn.functional.softmax(x, axis=-1, dtype=None, name=None)
    def setUp(self):
        self.place = get_device_place()
        self.executed_api()
        self.x_np_list = [
            np.random.uniform(-1.0, 1.0, list(range(2, ndim + 2))).astype(
                'float32'
            )
            for ndim in range(1, 6)
        ]
        self.out_ref_list = [
            ref_softmax(x_np, axis=-1, dtype=None) for x_np in self.x_np_list
        ]

    def test_static_check(self):
        with static_guard():
            for x_np, out_ref in zip(self.x_np_list, self.out_ref_list):
                func = F.softmax
                with paddle.static.program_guard(paddle.static.Program()):
                    x = paddle.static.data('X', x_np.shape, 'float32')
                    out1 = func(x=x, axis=-1)
                    out2 = func(x)
                    exe = paddle.static.Executor(self.place)
                    res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2])
                    for rr in res:
                        np.testing.assert_allclose(out_ref, rr, rtol=1e-05)

    def test_dygraph_check(self):
        paddle.disable_static(self.place)
        for x_np, out_ref in zip(self.x_np_list, self.out_ref_list):
            func = F.softmax
            x = paddle.to_tensor(x_np)
            out1 = func(x=x, axis=-1)
            x = paddle.to_tensor(x_np)
            out2 = func(x)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

            # explicitly use float32 for ROCm, as MIOpen does not yet support float64
            if core.is_compiled_with_rocm():
                out = func(x, dtype=np.float32)
                out_ref = ref_softmax(x_np, axis=-1, dtype=np.float32)
            else:
                out = func(x, dtype=np.float64)
                out_ref = ref_softmax(x_np, axis=-1, dtype=np.float64)
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

        paddle.enable_static()


class TestSoftmaxAPI_CompatibleWithTorch2(TestSoftmaxAPI):
    # paddle.softmax(Tensor input, int dim, dtype = None, *, Tensor out = None)
    # paddle.Tensor.softmax(dim, dtype = None)
    # paddle.special.softmax(input, dim, *, dtype=None)
    # torch.nn.functional.softmax(input, dim=None, _stacklevel=3, dtype=None)
    # torch.softmax(Tensor input, int dim, dtype = None, *, Tensor out = None)
    # torch.Tensor.softmax(int dim, dtype = None)
    # torch.special.softmax(input, dim, *, dtype=None)
    def _get_softmax_dim(self, ndim: int) -> int:
        if ndim == 0 or ndim == 1 or ndim == 3:
            ret = 0
        else:
            ret = 1
        return ret

    def setUp(self):
        self.place = get_device_place()
        self.executed_api()
        self.x_np_list = [
            np.random.uniform(-1.0, 1.0, list(range(2, ndim + 2))).astype(
                'float32'
            )
            for ndim in range(1, 6)
        ]
        self.out_ref_list = [
            ref_softmax(x_np, axis=self._get_softmax_dim(x_np.ndim), dtype=None)
            for x_np in self.x_np_list
        ]

    def test_static_check(self):
        with static_guard():
            for x_np, out_ref in zip(self.x_np_list, self.out_ref_list):
                func = compat.nn.functional.softmax
                with paddle.static.program_guard(paddle.static.Program()):
                    x = paddle.static.data('X', x_np.shape, 'float32')
                    out1 = func(input=x, dim=None, _stacklevel=3)
                    out2 = func(x, None, 3)
                    exe = paddle.static.Executor(self.place)
                    res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2])
                    for rr in res:
                        np.testing.assert_allclose(out_ref, rr, rtol=1e-05)

                func = paddle.softmax
                with paddle.static.program_guard(paddle.static.Program()):
                    x = paddle.static.data('X', x_np.shape, 'float32')
                    # pir can not support out
                    out1 = func(input=x, dim=None, out=None)
                    out2 = func(x, out=None)
                    exe = paddle.static.Executor(self.place)
                    res = exe.run(
                        feed={'X': x_np},
                        fetch_list=[out1, out2],
                    )
                    for rr in res:
                        np.testing.assert_allclose(out_ref, rr, rtol=1e-05)

                func = paddle.special.softmax
                with paddle.static.program_guard(paddle.static.Program()):
                    x = paddle.static.data('X', x_np.shape, 'float32')
                    out1 = func(input=x, dim=None)
                    out2 = func(x)
                    exe = paddle.static.Executor(self.place)
                    res = exe.run(
                        feed={'X': x_np},
                        fetch_list=[out1, out2],
                    )
                    for rr in res:
                        np.testing.assert_allclose(out_ref, rr, rtol=1e-05)

                func = paddle.Tensor.softmax
                with paddle.static.program_guard(paddle.static.Program()):
                    x = paddle.static.data('X', x_np.shape, 'float32')
                    out1 = func(input=x, dim=None)
                    out2 = func(x)
                    exe = paddle.static.Executor(self.place)
                    res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2])
                    for rr in res:
                        np.testing.assert_allclose(out_ref, rr, rtol=1e-05)

    def test_dygraph_check(self):
        paddle.disable_static(self.place)
        for x_np, out_ref in zip(self.x_np_list, self.out_ref_list):
            func = compat.nn.functional.softmax
            x = paddle.to_tensor(x_np)
            out1 = func(input=x, dim=None, _stacklevel=3)
            x = paddle.to_tensor(x_np)
            out2 = func(x, None, 3)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

            # explicitly use float32 for ROCm, as MIOpen does not yet support float64
            if core.is_compiled_with_rocm():
                out = func(x, dtype=np.float32)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float32,
                )
            else:
                out = func(x, dtype=np.float64)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float64,
                )
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

            func = paddle.softmax
            x = paddle.to_tensor(x_np)
            result1 = paddle.zeros(shape=x_np.shape, dtype='float32')
            out1 = func(input=x, dim=None, out=result1)
            x = paddle.to_tensor(x_np)
            result2 = paddle.zeros(shape=x_np.shape, dtype='float32')
            out2 = func(x, out=result2)
            for r in [out1, out2, result1, result2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

            # explicitly use float32 for ROCm, as MIOpen does not yet support float64
            if core.is_compiled_with_rocm():
                out = func(x, dtype=np.float32)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float32,
                )
            else:
                out = func(x, dtype=np.float64)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float64,
                )
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

            func = paddle.special.softmax
            x = paddle.to_tensor(x_np)
            out1 = func(input=x, dim=None)
            x = paddle.to_tensor(x_np)
            out2 = func(x)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

            # explicitly use float32 for ROCm, as MIOpen does not yet support float64
            if core.is_compiled_with_rocm():
                out = func(x, dtype=np.float32)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float32,
                )
            else:
                out = func(x, dtype=np.float64)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float64,
                )
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

            func = paddle.Tensor.softmax
            x = paddle.to_tensor(x_np)
            out1 = func(input=x, dim=None)
            x = paddle.to_tensor(x_np)
            out2 = func(x)
            for r in [out1, out2]:
                np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

            # explicitly use float32 for ROCm, as MIOpen does not yet support float64
            if core.is_compiled_with_rocm():
                out = func(x, dtype=np.float32)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float32,
                )
            else:
                out = func(x, dtype=np.float64)
                out_ref = ref_softmax(
                    x_np,
                    axis=self._get_softmax_dim(x_np.ndim),
                    dtype=np.float64,
                )
            np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

        paddle.enable_static()

    def test_forbid_keywords(self):
        with (
            static_guard(),
            paddle.static.program_guard(paddle.static.Program()),
        ):
            x = paddle.static.data('X', [2, 3], 'float32')
            self.assertRaises(
                TypeError, compat.nn.functional.softmax, x=x, axis=-1
            )
            self.assertRaises(
                TypeError, compat.nn.functional.softmax, x=x, dim=-1
            )
            self.assertRaises(
                TypeError, compat.nn.functional.softmax, input=x, axis=-1
            )

            if core.is_compiled_with_cuda() or is_custom_device():
                compat.nn.functional.softmax(input=x, dim=-1)


if __name__ == "__main__":
    unittest.main()
