# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
from op_test import OpTest, get_device_place, get_places, is_custom_device

import paddle
from paddle.base import Program, core, program_guard


def create_kernel_case(op_type, numpy_op_type):
    class ArgMinMaxKernelBaseCase(OpTest):
        def initTestCase(self):
            self.op_type = op_type
            self.numpy_op_type = numpy_op_type
            self.axis = 0

        def setUp(self):
            np.random.seed(123)
            self.initTestCase()
            if op_type == 'arg_min':
                self.python_api = paddle.Tensor.argmin
            else:
                self.python_api = paddle.Tensor.argmax
            self.dims = (4, 5, 6)
            self.dtype = "float64"
            self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
            self.inputs = {'X': self.x}
            self.attrs = {"axis": self.axis}
            self.numpy_op = eval(f"np.{numpy_op_type}")
            self.outputs = {'Out': self.numpy_op(self.x, axis=self.axis)}

        def test_check_output(self):
            paddle.enable_static()
            self.check_output()

    class ArgMinMaxKernelCase0(ArgMinMaxKernelBaseCase):
        def initTestCase(self):
            self.op_type = op_type
            self.numpy_op_type = numpy_op_type
            self.axis = 1

    class ArgMinMaxKernelCase1(ArgMinMaxKernelBaseCase):
        def initTestCase(self):
            self.op_type = op_type
            self.numpy_op_type = numpy_op_type
            self.axis = 2

    class ArgMinMaxKernelCase2(ArgMinMaxKernelBaseCase):
        def initTestCase(self):
            self.op_type = op_type
            self.numpy_op_type = numpy_op_type
            self.axis = -1

    class ArgMinMaxKernelCase3(ArgMinMaxKernelBaseCase):
        def initTestCase(self):
            self.op_type = op_type
            self.numpy_op_type = numpy_op_type
            self.axis = -2

    class ArgMinMaxKernelCase4(ArgMinMaxKernelBaseCase):
        def setUp(self):
            self.initTestCase()
            if op_type == 'arg_min':
                self.python_api = paddle.Tensor.argmin
            else:
                self.python_api = paddle.Tensor.argmax
            self.dims = (4, 5, 6)
            self.dtype = "float64"
            self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
            self.inputs = {'X': self.x}
            self.attrs = {"axis": self.axis, "keepdims": True}
            self.numpy_op = eval(f"np.{numpy_op_type}")
            self.outputs = {
                'Out': self.numpy_op(self.x, axis=self.axis).reshape((1, 5, 6))
            }

    class ArgMinMaxKernelCase5(ArgMinMaxKernelBaseCase):
        def setUp(self):
            self.initTestCase()
            if op_type == 'arg_min':
                self.python_api = paddle.Tensor.argmin
            else:
                self.python_api = paddle.Tensor.argmax
            self.dims = 4
            self.dtype = "float64"
            self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
            self.inputs = {'X': self.x}
            self.attrs = {"axis": self.axis, "flatten": True}
            self.numpy_op = eval(f"np.{numpy_op_type}")
            self.outputs = {
                'Out': self.numpy_op(self.x.flatten(), axis=self.axis)
            }

    class ArgMinMaxKernelCase6(ArgMinMaxKernelBaseCase):
        def setUp(self):
            self.initTestCase()
            if op_type == 'arg_min':
                self.python_api = paddle.Tensor.argmin
            else:
                self.python_api = paddle.Tensor.argmax
            self.dims = 4
            self.dtype = "float64"
            self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
            self.inputs = {'X': self.x}
            self.attrs = {"axis": self.axis, "flatten": True, "keepdims": False}
            self.numpy_op = eval(f"np.{numpy_op_type}")
            self.outputs = {
                'Out': np.array(self.numpy_op(self.x.flatten(), axis=self.axis))
            }

    cls_name = f"ArgMinMaxKernelBaseCase_{op_type}"
    ArgMinMaxKernelBaseCase.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelBaseCase

    cls_name = f"ArgMinMaxKernelCase0_{op_type}"
    ArgMinMaxKernelCase0.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase0

    cls_name = f"ArgMinMaxKernelCase1_{op_type}"
    ArgMinMaxKernelCase1.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase1

    cls_name = f"ArgMinMaxKernelCase2_{op_type}"
    ArgMinMaxKernelCase2.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase2

    cls_name = f"ArgMinMaxKernelCase3_{op_type}"
    ArgMinMaxKernelCase3.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase3

    cls_name = f"ArgMinMaxKernelCase4_{op_type}"
    ArgMinMaxKernelCase4.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase4

    cls_name = f"ArgMinMaxKernelCase5_{op_type}"
    ArgMinMaxKernelCase5.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase5

    cls_name = f"ArgMinMaxKernelCase6_{op_type}"
    ArgMinMaxKernelCase6.__name__ = cls_name
    globals()[cls_name] = ArgMinMaxKernelCase6


for op_type, numpy_op_type in zip(['arg_max', 'arg_min'], ['argmax', 'argmin']):
    create_kernel_case(op_type, numpy_op_type)


def create_test_case(op_type):
    class ArgMaxMinTestCase(unittest.TestCase):
        def setUp(self):
            np.random.seed(123)
            self.input_data = np.random.rand(10, 10).astype("float32")
            self.places = get_places()
            self.op = eval(f"paddle.{op_type}")
            self.numpy_op = eval(f"np.{op_type}")

        def run_static(self, place):
            paddle.enable_static()
            with paddle.static.program_guard(paddle.static.Program()):
                data_var = paddle.static.data(
                    name="data", shape=[10, 10], dtype="float32"
                )
                op = eval(f"paddle.{op_type}")
                result = op(data_var)
                exe = paddle.static.Executor(place)
                result_data = exe.run(
                    feed={"data": self.input_data}, fetch_list=[result]
                )
                expected_data = self.numpy_op(self.input_data)
                self.assertTrue(
                    (result_data == np.array(expected_data)).all(), True
                )

            with paddle.static.program_guard(paddle.static.Program()):
                data_var = paddle.static.data(
                    name="data", shape=[10, 10], dtype="float32"
                )
                op = eval(f"paddle.{op_type}")
                result = op(data_var, axis=1)
                exe = paddle.static.Executor(place)
                result_data = exe.run(
                    feed={"data": self.input_data}, fetch_list=[result]
                )
                expected_data = self.numpy_op(self.input_data, axis=1)
                self.assertTrue((result_data == expected_data).all(), True)

            with paddle.static.program_guard(paddle.static.Program()):
                data_var = paddle.static.data(
                    name="data", shape=[10, 10], dtype="float32"
                )
                op = eval(f"paddle.{op_type}")
                result = op(data_var, axis=-1)
                exe = paddle.static.Executor(place)
                result_data = exe.run(
                    feed={"data": self.input_data}, fetch_list=[result]
                )
                expected_data = self.numpy_op(self.input_data, axis=-1)
                self.assertTrue((result_data == expected_data).all(), True)

            with paddle.static.program_guard(paddle.static.Program()):
                data_var = paddle.static.data(
                    name="data", shape=[10, 10], dtype="float32"
                )

                op = eval(f"paddle.{op_type}")
                result = op(data_var, axis=-1, keepdim=True)
                exe = paddle.static.Executor(place)
                result_data = exe.run(
                    feed={"data": self.input_data}, fetch_list=[result]
                )
                expected_data = self.numpy_op(self.input_data, axis=-1).reshape(
                    (10, 1)
                )
                self.assertTrue((result_data == expected_data).all(), True)

            with paddle.static.program_guard(paddle.static.Program()):
                op = eval(f"paddle.{op_type}")
                data_var = paddle.static.data(
                    name="data", shape=[10, 10], dtype="float32"
                )
                result = op(data_var, axis=-1, name="test_arg_api")
                if paddle.framework.use_pir_api():
                    return
                self.assertTrue("test_arg_api" in result.name)

        def run_dygraph(self, place):
            paddle.disable_static(place)
            op = eval(f"paddle.{op_type}")
            data_tensor = paddle.to_tensor(self.input_data)

            # case 1
            result_data = op(data_tensor)
            excepted_data = self.numpy_op(self.input_data)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            # case 2
            result_data = op(data_tensor, axis=1)
            excepted_data = self.numpy_op(self.input_data, axis=1)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            # case 3
            result_data = op(data_tensor, axis=-1)
            excepted_data = self.numpy_op(self.input_data, axis=-1)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            # case 4
            result_data = op(data_tensor, axis=-1, keepdim=True)
            excepted_data = self.numpy_op(self.input_data, axis=-1)
            excepted_data = excepted_data.reshape((10, 1))
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            # case 5
            result_data = op(data_tensor, axis=-1, keepdim=True, dtype="int32")
            self.assertTrue(result_data.numpy().dtype == np.int32)

            # case for dim 4, 5, 6, for test case coverage
            input_data = np.random.rand(5, 5, 5, 5)
            excepted_data = self.numpy_op(input_data, axis=0)
            result_data = op(paddle.to_tensor(input_data), axis=0)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            input_data = np.random.rand(4, 4, 4, 4, 4)
            excepted_data = self.numpy_op(input_data, axis=0)
            result_data = op(paddle.to_tensor(input_data), axis=0)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

            input_data = np.random.rand(3, 3, 3, 3, 3, 3)
            excepted_data = self.numpy_op(input_data, axis=0)
            result_data = op(paddle.to_tensor(input_data), axis=0)
            self.assertTrue((result_data.numpy() == excepted_data).all(), True)

        def test_case(self):
            for place in self.places:
                self.run_static(place)
                self.run_dygraph(place)

    cls_name = f"ArgMaxMinTestCase_{op_type}"
    ArgMaxMinTestCase.__name__ = cls_name
    globals()[cls_name] = ArgMaxMinTestCase


for op_type in ['argmin', 'argmax']:
    create_test_case(op_type)


class TestArgMinMaxOpError(unittest.TestCase):
    def test_errors(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):

            def test_argmax_x_type():
                x1 = [1, 2, 3]
                output = paddle.argmax(x=x1)

            self.assertRaises(TypeError, test_argmax_x_type)

            def test_argmin_x_type():
                x2 = [1, 2, 3]
                output = paddle.argmin(x=x2)

            self.assertRaises(TypeError, test_argmin_x_type)

            def test_argmax_attr_type():
                data = paddle.static.data(
                    name="test_argmax", shape=[10], dtype="float32"
                )
                output = paddle.argmax(x=data, dtype="float32")

            self.assertRaises(ValueError, test_argmax_attr_type)

            def test_argmin_attr_type():
                data = paddle.static.data(
                    name="test_argmax", shape=[10], dtype="float32"
                )
                output = paddle.argmin(x=data, dtype="float32")

            self.assertRaises(ValueError, test_argmin_attr_type)

            def test_argmax_axis_type():
                data = paddle.static.data(
                    name="test_argmax", shape=[10], dtype="float32"
                )
                output = paddle.argmax(x=data, axis=1.2)

            self.assertRaises(TypeError, test_argmax_axis_type)

            def test_argmin_axis_type():
                data = paddle.static.data(
                    name="test_argmin", shape=[10], dtype="float32"
                )
                output = paddle.argmin(x=data, axis=1.2)

            self.assertRaises(TypeError, test_argmin_axis_type)

            def test_argmax_dtype_type():
                data = paddle.static.data(
                    name="test_argmax", shape=[10], dtype="float32"
                )
                output = paddle.argmax(x=data, dtype=None)

            self.assertRaises(ValueError, test_argmax_dtype_type)

            def test_argmin_dtype_type():
                data = paddle.static.data(
                    name="test_argmin", shape=[10], dtype="float32"
                )
                output = paddle.argmin(x=data, dtype=None)

            self.assertRaises(ValueError, test_argmin_dtype_type)


class TestArgMaxOpFp16(unittest.TestCase):
    def test_fp16(self):
        if core.is_compiled_with_cuda() or is_custom_device():
            x_np = np.random.random((10, 16)).astype('float16')
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data(
                    shape=[10, 16], name='x', dtype='float16'
                )
                out = paddle.argmax(x)
                place = get_device_place()
                exe = paddle.static.Executor(place)
                exe.run(paddle.static.default_startup_program())
                out = exe.run(feed={'x': x_np}, fetch_list=[out])


class TestArgMinOpFp16(unittest.TestCase):
    def test_fp16(self):
        if core.is_compiled_with_cuda() or is_custom_device():
            x_np = np.random.random((10, 16)).astype('float16')
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data(
                    shape=[10, 16], name='x', dtype='float16'
                )
                out = paddle.argmin(x)
                place = get_device_place()
                exe = paddle.static.Executor(place)
                exe.run(paddle.static.default_startup_program())
                out = exe.run(feed={'x': x_np}, fetch_list=[out])


class TestArgmaxAPI_Compatibility(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = get_places()
        self.shape = [5, 6]
        self.dtype = 'float32'
        self.init_data()

    def init_data(self):
        self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype)

    def _test_dygraph_Compatibility(self, api_name):
        paddle.disable_static()
        x = paddle.to_tensor(self.np_input)
        paddle_dygraph_out = []
        paddle_api = eval(f"paddle.{api_name}")
        # Position args (args)
        out1 = paddle_api(x, 1)
        paddle_dygraph_out.append(out1)
        # Keywords args (kwargs) for paddle
        out2 = paddle_api(x=x, axis=1)
        paddle_dygraph_out.append(out2)
        # Keywords args for torch
        out3 = paddle_api(input=x, dim=1)
        paddle_dygraph_out.append(out3)
        # Combined args and kwargs
        out4 = paddle_api(x, dim=1)
        paddle_dygraph_out.append(out4)

        # Tensor method kwargs and args
        if api_name == "argmax":
            out5 = x.argmax(1)
            out6 = x.argmax(dim=1)
        elif api_name == "argmin":
            out5 = x.argmin(1)
            out6 = x.argmin(dim=1)
        paddle_dygraph_out.append(out5)
        paddle_dygraph_out.append(out6)
        # Numpy reference  out
        np_api = eval(f"np.{api_name}")
        ref_out = np_api(self.np_input, 1)
        # Check
        for out in paddle_dygraph_out:
            np.testing.assert_allclose(ref_out, out.numpy())
        paddle.enable_static()

    def _test_static_Compatibility(self, api_name):
        main = paddle.static.Program()
        startup = paddle.static.Program()
        with paddle.base.program_guard(main, startup):
            x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype)
            paddle_api = eval(f"paddle.{api_name}")
            # Position args (args)
            out1 = paddle_api(x, 1)
            # Keywords args (kwargs) for paddle
            out2 = paddle_api(x=x, axis=1)
            # Keywords args for torch
            out3 = paddle_api(input=x, dim=1)
            # Combined args and kwargs
            out4 = paddle_api(x, dim=1)

            if api_name == "argmax":
                out5 = x.argmax(1)
                out6 = x.argmax(dim=1)
            elif api_name == "argmin":
                out5 = x.argmin(1)
                out6 = x.argmin(dim=1)

            # Do not support out in static
            # out7 = paddle.empty([])
            exe = paddle.base.Executor(paddle.CPUPlace())
            fetches = exe.run(
                main,
                feed={"x": self.np_input},
                fetch_list=[out1, out2, out3, out4, out5, out6],
            )
            np_api = eval(f"np.{api_name}")
            ref_out = np_api(self.np_input, 1)
            for out in fetches:
                np.testing.assert_allclose(out, ref_out)

    def test(self):
        apis = ["argmax", "argmin"]
        for api in apis:
            self._test_dygraph_Compatibility(api)
            self._test_static_Compatibility(api)


if __name__ == '__main__':
    unittest.main()
