# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
from op_test import check_out_dtype, get_device_place, is_custom_device

import paddle
import paddle.nn.functional as F
from paddle import base
from paddle.base import core


def adaptive_start_index(index, input_size, output_size):
    return int(np.floor(index * input_size / output_size))


def adaptive_end_index(index, input_size, output_size):
    return int(np.ceil((index + 1) * input_size / output_size))


def adaptive_pool2d_forward(
    x, output_size, data_format='NCHW', pool_type="max"
):
    N = x.shape[0]
    C, H, W = (
        [x.shape[1], x.shape[2], x.shape[3]]
        if data_format == 'NCHW'
        else [x.shape[3], x.shape[1], x.shape[2]]
    )

    if isinstance(output_size, int) or output_size is None:
        H_out = output_size
        W_out = output_size
        output_size = [H_out, W_out]
    else:
        H_out, W_out = output_size

    if output_size[0] is None:
        output_size[0] = H
        H_out = H
    if output_size[1] is None:
        output_size[1] = W
        W_out = W

    out = (
        np.zeros((N, C, H_out, W_out))
        if data_format == 'NCHW'
        else np.zeros((N, H_out, W_out, C))
    )
    if x.size == 0:
        return out

    for i in range(H_out):
        in_h_start = adaptive_start_index(i, H, output_size[0])
        in_h_end = adaptive_end_index(i, H, output_size[0])

        for j in range(W_out):
            in_w_start = adaptive_start_index(j, W, output_size[1])
            in_w_end = adaptive_end_index(j, W, output_size[1])

            if data_format == 'NCHW':
                x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end]
                if pool_type == 'avg':
                    field_size = (in_h_end - in_h_start) * (
                        in_w_end - in_w_start
                    )
                    out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size
                elif pool_type == 'max':
                    out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
            elif data_format == 'NHWC':
                x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :]
                if pool_type == 'avg':
                    field_size = (in_h_end - in_h_start) * (
                        in_w_end - in_w_start
                    )
                    out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size
                elif pool_type == 'max':
                    out[:, i, j, :] = np.max(x_masked, axis=(1, 2))
    return out


class TestAdaptiveMaxPool2DAPI(unittest.TestCase):
    def setUp(self):
        self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
        self.res_1_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[3, 3], pool_type="max"
        )

        self.res_2_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=5, pool_type="max"
        )

        self.res_3_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[2, 5], pool_type="max"
        )
        """
        self.res_4_np = adaptive_pool2d_forward(
            x=self.x_np,
            output_size=[3, 3],
            pool_type="max",
            data_format="NHWC")
        """
        self.res_5_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[None, 3], pool_type="max"
        )

    def test_static_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
            x = paddle.static.data(
                name="x", shape=[2, 3, 7, 7], dtype="float32"
            )

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[3, 3]
            )

            out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[2, 5]
            )

            # out_4 = paddle.nn.functional.adaptive_max_pool2d(
            #    x=x, output_size=[3, 3], data_format="NHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[None, 3]
            )

            exe = paddle.static.Executor(place=place)
            [res_1, res_2, res_3, res_5] = exe.run(
                base.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[out_1, out_2, out_3, out_5],
            )

            np.testing.assert_allclose(res_1, self.res_1_np)

            np.testing.assert_allclose(res_2, self.res_2_np)

            np.testing.assert_allclose(res_3, self.res_3_np)

            # np.testing.assert_allclose(res_4, self.res_4_np)

            np.testing.assert_allclose(res_5, self.res_5_np)

    def test_static_graph_return_mask(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
            x = paddle.static.data(
                name="x", shape=[2, 3, 7, 7], dtype="float32"
            )

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[3, 3], return_mask=True
            )

            out_2 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=5, return_mask=True
            )

            out_3 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[2, 5], return_mask=True
            )

            # out_4 = paddle.nn.functional.adaptive_max_pool2d(
            #    x=x, output_size=[3, 3], data_format="NHWC"), return_mask=True

            out_5 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[None, 3], return_mask=True
            )

            exe = paddle.static.Executor(place=place)
            [
                res_1,
                mask_1,
                res_2,
                mask_2,
                res_3,
                mask_3,
                res_5,
                mask_5,
            ] = exe.run(
                base.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[out_1, out_2, out_3, out_5],
            )

            self.assertEqual(res_1.shape, mask_1.shape)

            self.assertEqual(res_2.shape, mask_2.shape)

            self.assertEqual(res_3.shape, mask_3.shape)

            # self.assertEqual(res_4.shape, mask_4.shape)

            self.assertEqual(res_5.shape, mask_5.shape)

    def test_dynamic_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
            x = paddle.to_tensor(self.x_np)

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, return_mask=False, output_size=[3, 3]
            )

            out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5)

            out_3 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[2, 5]
            )

            # out_4 = paddle.nn.functional.adaptive_max_pool2d(
            #    x=x, output_size=[3, 3], data_format="NHWC")

            out_5 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[None, 3]
            )

            # test @param_two_alias(["x", "input"], ["return_mask", "return_indices"])
            out_6 = paddle.nn.functional.adaptive_max_pool2d(
                input=x, output_size=[None, 3], return_indices=False
            )
            np.testing.assert_allclose(out_1.numpy(), self.res_1_np)

            np.testing.assert_allclose(out_2.numpy(), self.res_2_np)

            np.testing.assert_allclose(out_3.numpy(), self.res_3_np)

            # np.testing.assert_allclose(out_4.numpy(), self.res_4_np)

            np.testing.assert_allclose(out_5.numpy(), self.res_5_np)
            np.testing.assert_allclose(out_6.numpy(), self.res_5_np)


class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase):
    def setUp(self):
        self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
        self.res_1_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[3, 3], pool_type="max"
        )

        self.res_2_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=5, pool_type="max"
        )

        self.res_3_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[2, 5], pool_type="max"
        )

        # self.res_4_np = adaptive_pool2d_forward(
        #    x=self.x_np,
        #    output_size=[3, 3],
        #    pool_type="max",
        #    data_format="NHWC")

        self.res_5_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[None, 3], pool_type="max"
        )

    def test_static_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                place = get_device_place() if use_cuda else paddle.CPUPlace()
                paddle.enable_static()
                x = paddle.static.data(
                    name="x", shape=[2, 3, 7, 7], dtype="float32"
                )

                adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
                    output_size=[3, 3]
                )
                out_1 = adaptive_max_pool(x=x)

                adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
                out_2 = adaptive_max_pool(x=x)

                adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
                    output_size=[2, 5]
                )
                out_3 = adaptive_max_pool(x=x)

                #    adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
                #        output_size=[3, 3], data_format="NHWC")
                #    out_4 = adaptive_max_pool(x=x)

                adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
                    output_size=[None, 3]
                )
                out_5 = adaptive_max_pool(x=x)

                exe = paddle.static.Executor(place=place)
                [res_1, res_2, res_3, res_5] = exe.run(
                    base.default_main_program(),
                    feed={"x": self.x_np},
                    fetch_list=[out_1, out_2, out_3, out_5],
                )

                np.testing.assert_allclose(res_1, self.res_1_np)

                np.testing.assert_allclose(res_2, self.res_2_np)

                np.testing.assert_allclose(res_3, self.res_3_np)

                # np.testing.assert_allclose(res_4, self.res_4_np)

                np.testing.assert_allclose(res_5, self.res_5_np)

    def test_dynamic_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
            x = paddle.to_tensor(self.x_np)

            adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
            out_1 = adaptive_max_pool(x=x)

            adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
            out_2 = adaptive_max_pool(x=x)

            adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
            out_3 = adaptive_max_pool(x=x)

            # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
            #    output_size=[3, 3], data_format="NHWC")
            # out_4 = adaptive_max_pool(x=x)

            adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
                output_size=[None, 3]
            )
            out_5 = adaptive_max_pool(x=x)

            adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(
                output_size=[None, 3], return_indices=True
            )
            self.assertEqual(adaptive_max_pool.return_indices, True)
            adaptive_max_pool.return_indices = False
            out_6 = adaptive_max_pool(input=x)

            np.testing.assert_allclose(out_1.numpy(), self.res_1_np)

            np.testing.assert_allclose(out_2.numpy(), self.res_2_np)

            np.testing.assert_allclose(out_3.numpy(), self.res_3_np)

            # np.testing.assert_allclose(out_4.numpy(), self.res_4_np)

            np.testing.assert_allclose(out_5.numpy(), self.res_5_np)

            np.testing.assert_allclose(out_6.numpy(), self.res_5_np)


class TestOutDtype(unittest.TestCase):
    def test_max_pool(self):
        api_fn = F.adaptive_max_pool2d
        shape = [1, 3, 32, 32]
        check_out_dtype(
            api_fn,
            in_specs=[(shape,)],
            expect_dtypes=['float32', 'float64'],
            output_size=16,
        )


class TestAdaptiveMaxPool2D_ZeroSize(unittest.TestCase):
    def setUp(self):
        self.x_np = np.random.random([0, 3, 7, 7]).astype("float32")
        self.res_1_np = adaptive_pool2d_forward(
            x=self.x_np, output_size=[3, 3], pool_type="max"
        )

    def test_static_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
            x = paddle.static.data(
                name="x", shape=[0, 3, 7, 7], dtype="float32"
            )

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[3, 3]
            )

            exe = paddle.static.Executor(place=place)
            [
                res_1,
            ] = exe.run(
                base.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[
                    out_1,
                ],
            )

            np.testing.assert_allclose(res_1, self.res_1_np)

    def test_static_graph_return_mask(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.enable_static()
            x = paddle.static.data(
                name="x", shape=[0, 3, 7, 7], dtype="float32"
            )

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, output_size=[3, 3], return_mask=True
            )

            exe = paddle.static.Executor(place=place)
            [
                res_1,
                mask_1,
            ] = exe.run(
                base.default_main_program(),
                feed={"x": self.x_np},
                fetch_list=[
                    out_1,
                ],
            )

            self.assertEqual(res_1.shape, mask_1.shape)

    def test_dynamic_graph(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
            x = paddle.to_tensor(self.x_np)

            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, return_mask=False, output_size=[3, 3]
            )

            np.testing.assert_allclose(out_1.numpy(), self.res_1_np)

    def test_grad(self):
        for use_cuda in (
            [False, True]
            if (core.is_compiled_with_cuda() or is_custom_device())
            else [False]
        ):
            place = get_device_place() if use_cuda else paddle.CPUPlace()
            paddle.disable_static(place=place)
            x = paddle.to_tensor(self.x_np)
            x.stop_gradient = False
            out_1 = paddle.nn.functional.adaptive_max_pool2d(
                x=x, return_mask=False, output_size=[3, 3]
            )
            loss = paddle.sum(out_1)
            loss.backward()
            np.testing.assert_allclose(x.grad.shape, x.shape)


if __name__ == '__main__':
    unittest.main()
