# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import pytest
import torch
from torch.nn import Linear

from sparseml.pytorch.sparsification.pruning import (
    PowerpropagationModifier,
    PowerpropagationWrapper,
)
from tests.sparseml.pytorch.helpers import LinearNet
from tests.sparseml.pytorch.sparsification.pruning.helpers import (
    state_dict_save_load_test,
)
from tests.sparseml.pytorch.sparsification.test_modifier import (
    ScheduledModifierTest,
    create_optim_adam,
    create_optim_sgd,
)


from tests.sparseml.pytorch.helpers import (  # noqa isort:skip
    test_epoch,
    test_loss,
    test_steps_per_epoch,
)


@pytest.mark.parametrize(
    "modifier_lambda",
    [
        lambda: PowerpropagationModifier(
            start_epoch=0,
            end_epoch=10,
            params=["re:.*weight"],
            alpha=2.0,
        ),
        lambda: PowerpropagationModifier(
            params=["seq.fc1.weight"],
            start_epoch=10.0,
            end_epoch=25.0,
            alpha=3.0,
        ),
    ],
    scope="function",
)
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize(
    "optim_lambda",
    [create_optim_sgd, create_optim_adam],
    scope="function",
)
class TestPowerpropagationModifier(ScheduledModifierTest):
    def test_lifecycle(
        self,
        modifier_lambda,
        model_lambda,
        optim_lambda,
        test_steps_per_epoch,  # noqa: F811
    ):
        modifier = modifier_lambda()
        model = model_lambda()
        optimizer = optim_lambda(model)
        self.initialize_helper(modifier, model)

        if modifier.start_epoch >= 0:
            for epoch in range(int(modifier.start_epoch)):
                assert not modifier.update_ready(epoch, test_steps_per_epoch)

        epoch = int(modifier.start_epoch) if modifier.start_epoch >= 0 else 0.0
        assert modifier.update_ready(epoch, test_steps_per_epoch)
        modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)

        if modifier.end_epoch >= 0:
            epoch = int(modifier.end_epoch)
            assert modifier.update_ready(epoch, test_steps_per_epoch)
            modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)

            for epoch in range(
                int(modifier.end_epoch) + 1, int(modifier.end_epoch) + 6
            ):
                assert not modifier.update_ready(epoch, test_steps_per_epoch)

    def test_state_dict_save_load(
        self,
        modifier_lambda,
        model_lambda,
        optim_lambda,
        test_steps_per_epoch,  # noqa: F811
    ):
        state_dict_save_load_test(
            self,
            modifier_lambda,
            model_lambda,
            optim_lambda,
            test_steps_per_epoch,
            False,
        )

    def test_powerprop_mechanism(
        self,
        modifier_lambda,
        model_lambda,
        optim_lambda,
        test_steps_per_epoch,  # noqa: F811
    ):
        modifier = modifier_lambda()
        model = model_lambda()
        optimizer = optim_lambda(model)
        self.initialize_helper(modifier, model)

        batch_shape = 10
        input_shape = model_lambda.layer_descs()[0].input_size
        epoch = int(modifier.start_epoch)

        # We check that when the powerpropagation modifier is activated,
        # the model architecture changes to wrap the linear layers in
        # PowerpropagationWrapper modules. Further, the weights of the layer
        # are adjusted so that new_weight^alpha = previous_weight.
        # Then, once the modifier is deactivated, the architecture should
        # revert, and the weights are exponentiated in-place to reflect
        # the fact that they are no longer exponentiated in the forward pass.
        while epoch < modifier.end_epoch:
            optimizer.zero_grad()
            model(torch.randn(batch_shape, *input_shape)).mean().backward()
            optimizer.step()

            first_module = next(next(model.children()).children())
            if epoch == modifier.start_epoch:
                layer_weights_pre = first_module.weight.clone()
            else:
                layer_weights_pre = first_module.layer.weight.clone()

            if modifier.update_ready(epoch, test_steps_per_epoch):
                modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)

            assert isinstance(
                next(next(model.children()).children()), PowerpropagationWrapper
            )
            first_powerpropagated_module = next(next(model.children()).children())
            layer_weights_post = first_powerpropagated_module.layer.weight.clone()

            if epoch == modifier.start_epoch:
                assert torch.allclose(
                    layer_weights_post
                    * pow(
                        abs(layer_weights_post), first_powerpropagated_module.alpha - 1
                    ),
                    layer_weights_pre,
                )
            else:
                assert torch.allclose(layer_weights_pre, layer_weights_post)

            epoch += 1

        # Now check what happens when the powerpropagation is removed,
        # i.e., the epoch is the modifier end_epoch.
        optimizer.zero_grad()
        model(torch.randn(batch_shape, *input_shape)).mean().backward()
        optimizer.step()
        first_module = next(next(model.children()).children())
        layer_weights_pre = first_module.layer.weight.clone()
        alpha = first_module.alpha

        if modifier.update_ready(epoch, test_steps_per_epoch):
            modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
        first_unpowerpropagated_module = next(next(model.children()).children())
        layer_weights_post = first_unpowerpropagated_module.weight.clone()

        assert isinstance(next(next(model.children()).children()), Linear)
        assert torch.allclose(
            layer_weights_pre * pow(abs(layer_weights_pre), alpha - 1),
            layer_weights_post,
        )


@pytest.mark.skipif(
    os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
    reason="Skipping pytorch tests",
)
def test_powerpropagation_yaml():
    start_epoch = 5.0
    end_epoch = 15.0
    params = ["re:.*weight"]
    alpha = 1.5
    yaml_str = f"""
    !PowerpropagationModifier
        start_epoch: {start_epoch}
        end_epoch: {end_epoch}
        params: {params}
        alpha: {alpha}
    """
    yaml_modifier = PowerpropagationModifier.load_obj(
        yaml_str
    )  # type: PowerpropagationModifier
    serialized_modifier = PowerpropagationModifier.load_obj(
        str(yaml_modifier)
    )  # type: PowerpropagationModifier
    obj_modifier = PowerpropagationModifier(
        start_epoch=start_epoch, end_epoch=end_epoch, params=params, alpha=alpha
    )

    assert isinstance(yaml_modifier, PowerpropagationModifier)
    assert (
        yaml_modifier.start_epoch
        == serialized_modifier.start_epoch
        == obj_modifier.start_epoch
    )
    assert (
        yaml_modifier.end_epoch
        == serialized_modifier.end_epoch
        == obj_modifier.end_epoch
    )
    assert yaml_modifier.params == serialized_modifier.params == obj_modifier.params
