import numpy as np
import deepxde as dde
import deepxde.backend as bkd
from deepxde import gradients as grad
from deepxde import optimizers
from deepxde.backend import backend_name, tf, torch, jax, paddle
from deepxde import config
from deepxde import losses as losses_module
from deepxde import metrics as metrics_module
from deepxde import utils

from deepxde import Model
from AI4XDE.algorithm.PINN import PINN


class PINN_adam(PINN):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.name = "PINN_adam"

    def train_step(self, lr=1e-3, iterations=15000, callbacks=None, eval=False):
        self.PDECase.compile(self.model, "adam", lr=lr)
        self.losshistory, self.train_state = self.model.train(
            iterations=iterations, callbacks=callbacks
        )
        if eval:
            self.eval()


class LossModel(Model):
    def __init__(self, PDECase, trainable_lambda):
        super().__init__(PDECase.data, PDECase.net)
        self.PDECase = PDECase
        self.trainable_lambda = trainable_lambda

    def transfer_loss_net(self, loss):
        if self.trainable_lambda < 0.01:
            self.trainable_lambda = dde.Variable(0.01)

        return torch.log(1 + bkd.sigmoid(self.trainable_lambda) * loss)

    def transfer_loss_to_pde_and_net(self, loss_list):
        if len(loss_list) == 1:
            return loss_list
        icbc_len = len(self.PDECase.data.bcs)
        pde_len = len(loss_list) - icbc_len
        loss_pde = bkd.sum(bkd.stack(loss_list[:pde_len], axis=0), dim=0)
        loss_net = bkd.sum(bkd.stack(loss_list[pde_len:], axis=0), dim=0)
        loss_net = self.transfer_loss_net(loss_net)
        return [loss_pde, loss_net]

    @utils.timing
    def compile(
        self,
        optimizer,
        lr=None,
        loss="MSE",
        metrics=None,
        decay=None,
        loss_weights=None,
        external_trainable_variables=None,
    ):
        """Configures the model for training.

        Args:
            optimizer: String name of an optimizer, or a backend optimizer class
                instance.
            lr (float): The learning rate. For L-BFGS, use
                ``dde.optimizers.set_LBFGS_options`` to set the hyperparameters.
            loss: If the same loss is used for all errors, then `loss` is a String name
                of a loss function or a loss function. If different errors use
                different losses, then `loss` is a list whose size is equal to the
                number of errors.
            metrics: List of metrics to be evaluated by the model during training.
            decay (tuple): Name and parameters of decay to the initial learning rate.
                One of the following options:

                - For backend TensorFlow 1.x:

                    - `inverse_time_decay <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/inverse_time_decay>`_: ("inverse time", decay_steps, decay_rate)
                    - `cosine_decay <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/cosine_decay>`_: ("cosine", decay_steps, alpha)

                - For backend TensorFlow 2.x:

                    - `InverseTimeDecay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/InverseTimeDecay>`_: ("inverse time", decay_steps, decay_rate)
                    - `CosineDecay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/CosineDecay>`_: ("cosine", decay_steps, alpha)

                - For backend PyTorch:

                    - `StepLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html>`_: ("step", step_size, gamma)

                - For backend PaddlePaddle:

                    - `InverseTimeDecay
                      <https://www.paddlepaddle.org.cn/documentation/docs/en/develop/api/paddle/optimizer/lr/InverseTimeDecay_en.html>`_:
                      ("inverse time", gamma)

            loss_weights: A list specifying scalar coefficients (Python floats) to
                weight the loss contributions. The loss value that will be minimized by
                the model will then be the weighted sum of all individual losses,
                weighted by the `loss_weights` coefficients.
            external_trainable_variables: A trainable ``dde.Variable`` object or a list
                of trainable ``dde.Variable`` objects. The unknown parameters in the
                physics systems that need to be recovered. If the backend is
                tensorflow.compat.v1, `external_trainable_variables` is ignored, and all
                trainable ``dde.Variable`` objects are automatically collected.
        """
        if config.rank == 0:
            print("Compiling model...")
        self.opt_name = optimizer
        loss_fn = losses_module.get(loss)
        self.losshistory.set_loss_weights(loss_weights)
        if external_trainable_variables is None:
            self.external_trainable_variables = []
        else:
            if backend_name == "tensorflow.compat.v1":
                print(
                    "Warning: For the backend tensorflow.compat.v1, "
                    "`external_trainable_variables` is ignored, and all trainable "
                    "``tf.Variable`` objects are automatically collected."
                )
            if not isinstance(external_trainable_variables, list):
                external_trainable_variables = [external_trainable_variables]
            external_trainable_variables.append(self.trainable_lambda)
            self.external_trainable_variables = external_trainable_variables

        if backend_name == "tensorflow.compat.v1":
            self._compile_tensorflow_compat_v1(lr, loss_fn, decay, loss_weights)
        elif backend_name == "tensorflow":
            self._compile_tensorflow(lr, loss_fn, decay, loss_weights)
        elif backend_name == "pytorch":
            self._compile_pytorch(lr, loss_fn, decay, loss_weights)
        elif backend_name == "jax":
            self._compile_jax(lr, loss_fn, decay, loss_weights)
        elif backend_name == "paddle":
            self._compile_paddle(lr, loss_fn, decay, loss_weights)
        # metrics may use model variables such as self.net, and thus are instantiated
        # after backend compile.
        metrics = metrics or []
        self.metrics = [metrics_module.get(m) for m in metrics]

    def _compile_pytorch(self, lr, loss_fn, decay, loss_weights):
        """pytorch"""

        def outputs(training, inputs):
            self.net.train(mode=training)
            with torch.no_grad():
                if isinstance(inputs, tuple):
                    inputs = tuple(
                        map(lambda x: torch.as_tensor(x).requires_grad_(), inputs)
                    )
                else:
                    inputs = torch.as_tensor(inputs)
                    inputs.requires_grad_()
            # Clear cached Jacobians and Hessians.
            grad.clear()
            return self.net(inputs)

        def outputs_losses(training, inputs, targets, losses_fn):
            self.net.train(mode=training)
            if isinstance(inputs, tuple):
                inputs = tuple(
                    map(lambda x: torch.as_tensor(x).requires_grad_(), inputs)
                )
            else:
                inputs = torch.as_tensor(inputs)
                inputs.requires_grad_()
            outputs_ = self.net(inputs)
            # Data losses
            if targets is not None:
                targets = torch.as_tensor(targets)
            losses = losses_fn(targets, outputs_, loss_fn, inputs, self)
            if not isinstance(losses, list):
                losses = [losses]
            losses = self.transfer_loss_to_pde_and_net(losses)
            losses = torch.stack(losses)
            # Weighted losses
            if loss_weights is not None:
                losses *= torch.as_tensor(loss_weights)
            # Clear cached Jacobians and Hessians.
            grad.clear()
            return outputs_, losses

        def outputs_losses_train(inputs, targets):
            return outputs_losses(True, inputs, targets, self.data.losses_train)

        def outputs_losses_test(inputs, targets):
            return outputs_losses(False, inputs, targets, self.data.losses_test)

        # Another way is using per-parameter options
        # https://pytorch.org/docs/stable/optim.html#per-parameter-options,
        # but not all optimizers (such as L-BFGS) support this.
        trainable_variables = (
            list(self.net.parameters()) + self.external_trainable_variables
        )
        if self.net.regularizer is None:
            self.opt, self.lr_scheduler = optimizers.get(
                trainable_variables, self.opt_name, learning_rate=lr, decay=decay
            )
        else:
            if self.net.regularizer[0] == "l2":
                self.opt, self.lr_scheduler = optimizers.get(
                    trainable_variables,
                    self.opt_name,
                    learning_rate=lr,
                    decay=decay,
                    weight_decay=self.net.regularizer[1],
                )
            else:
                raise NotImplementedError(
                    f"{self.net.regularizer[0]} regularizaiton to be implemented for "
                    "backend pytorch."
                )

        def train_step(inputs, targets):
            def closure():
                losses = outputs_losses_train(inputs, targets)[1]
                total_loss = torch.sum(losses)
                self.opt.zero_grad()
                total_loss.backward()
                return total_loss

            self.opt.step(closure)
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()

        # Callables
        self.outputs = outputs
        self.outputs_losses_train = outputs_losses_train
        self.outputs_losses_test = outputs_losses_test
        self.train_step = train_step


class LossPINN(PINN_adam):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.model = LossModel(self.PDECase, trainable_lambda=dde.Variable(10.0))
        self.name = "LossPINN"
