from functools import partial

import jax.numpy as jnp
from flax import nnx

from model.data_processing.data_loaders import KinematicBatch
from model.diffusion.diffusion_ctrl import DiffusionIKModel, DiffusionPlanningModel
from model.mlp_ctrl import CtrlModel
from model.networks.mlp import MLP, EnsembleMLP, LipschitzMLP


class MlpPartialIKModel(CtrlModel):
    """
    Map end-effector positions to joint: (x, y, z) -> (j1, j2, j3, j4, j5, j6)
    The rotation is not considered and is learned from the data

    If used as FK model, the input is (j1, j2, j3, j4, j5, j6) and the output is (x, y, z)
    """

    def __init__(
        self,
        in_dim: int = 3,
        out_dim: int = 6,
        hidden_dims=(512, 512, 512),
        activation_type="Mish",
        use_layernorm=False,
        dropout_rate: float = None,
        **kwargs
    ):

        seed = kwargs.get("seed", 0)
        model = MLP(
            rngs=nnx.Rngs(seed),
            layer_dims=(in_dim, *hidden_dims, out_dim),
            activation_type=activation_type,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
        )
        super().__init__(model, **kwargs)


class MlpPartialFKModel(MlpPartialIKModel):
    def __init__(self, in_dim: int, out_dim: int, **kwargs):
        super().__init__(in_dim, out_dim, **kwargs)

    def update(self, batch: KinematicBatch):
        """
        given the joint positions, predict the end-effector xyz
        """
        self.model.train()
        info = self.jit_train_step(self.model, self.opt, batch.joint, batch.xyz)
        return info


class LipschitzIKModel(CtrlModel):
    def __init__(
        self,
        in_dim: int = 3,
        out_dim: int = 6,
        hidden_dims=(512, 512, 512),
        activation_type="Mish",
        use_layernorm=False,
        dropout_rate: float = None,
        lip_w: float = 1e-3,
        **kwargs
    ):

        seed = kwargs.get("seed", 0)
        model = LipschitzMLP(
            rngs=nnx.Rngs(seed),
            layer_dims=(in_dim, *hidden_dims, out_dim),
            activation_type=activation_type,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
        )
        self.lip_w = lip_w

        super().__init__(model, **kwargs)

    @partial(nnx.jit, static_argnums=(0,))
    def jit_train_step(self, model: nnx.Module, optimizer: nnx.Optimizer, batch):
        """
        batch.arm: (batch_size, 6d)
        batch.joint: (batch_size, 6d)
        """
        # TODO: importance re-weighted L2-loss

        def loss_fn(model: nnx.module):
            pred_joints = model(batch.x)

            # compute the lipschitz loss
            lip_loss = jnp.ones((1,))
            for l in model.linear_layers:
                lip_loss = lip_loss * nnx.softplus(l.c.value)

            l2_loss = jnp.mean((pred_joints - batch.y) ** 2)
            loss = l2_loss + self.lip_w * lip_loss[0]
            return loss, {"loss": loss, "l2_loss": l2_loss, "lip_loss": lip_loss}

        (_, info), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)
        optimizer.update(grads)  # inplace update
        return info


class FKGradientIK(LipschitzIKModel):
    def __init__(
        self,
        in_dim: int,
        out_dim: int,
        hidden_dims=(512, 512, 512),
        activation_type="Mish",
        use_layernorm=False,
        dropout_rate: float = None,
        lip_w: float = 1e-3,
        step_size: float = 0.1,
        **kwargs
    ):
        self.step_size = step_size
        super().__init__(
            in_dim=in_dim,
            out_dim=out_dim,
            hidden_dims=hidden_dims,
            activation_type=activation_type,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
            lip_w=lip_w,
            **kwargs
        )

    def get_delta_joint(self, joints, pos):
        """
        joints in [-1, 1]
        pos in [-1, 1]
        are all in normalized values
        """
        self.model.eval()
        if pos.ndim == 1:
            # add batch dim
            pos = pos[None, :]
        if joints.ndim == 1:
            joints = joints[None, :]
        delta_j = self.jit_get_delta_joint(self.model, joints, pos)
        return delta_j

    @partial(nnx.jit, static_argnums=(0,))
    def jit_get_delta_joint(self, model: nnx.Module, current_joints: jnp.ndarray, tar_xyz: jnp.ndarray):
        # vmap works for batch of actions -> grad = (B, dim_j)
        grad = nnx.vmap(nnx.grad(lambda j, m: -jnp.mean((m(j) - tar_xyz) ** 2)), in_axes=(0, None))(
            current_joints, model
        )

        return self.step_size * grad


class EnsembleMLPIKModel(CtrlModel):
    """
    currently not support dropout, which requires rngs
    """

    def __init__(
        self,
        in_dim: int,
        out_dim: int,
        num_heads: int = 1,
        hidden_dims=(512, 512, 512),
        activation_type="Mish",
        use_layernorm=False,
        dropout_rate: float = None,
        **kwargs
    ):

        seed = kwargs.get("seed", 0)
        netowrk = EnsembleMLP(
            rngs=nnx.Rngs(seed),
            num_heads=num_heads,
            layer_dims=(in_dim, *hidden_dims, out_dim),
            activation_type=activation_type,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
        )
        super().__init__(netowrk, **kwargs)

    def __call__(self, pos):
        # take the average action
        return super().__call__(pos).mean(axis=0)
