
import jax.numpy as jnp
from flax import linen as nn
from model.networks.mlp import MLP, CondResMLP
from ..utils.jax_types import Sequence, Optional, PRNGKey



class ValueNet(nn.Module):
    hidden_dims: Sequence[int]
    layer_norm: bool = False
    dropout_rate: Optional[float] = None

    @nn.compact
    def __call__(self, observations: jnp.ndarray) -> jnp.ndarray:
        state_value = MLP((*self.hidden_dims, 1), use_layernorm=self.layer_norm, dropout_rate=self.dropout_rate)(observations)
        return jnp.squeeze(state_value, -1)


class QNet(nn.Module):
    """
    For continuous actions
    """

    # input concatenate(s, a) -> scalar
    hidden_dims: Sequence[int]
    activation_type: str = "Mish"
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None
    cond_residual: bool = False

    @nn.compact
    def __call__(self, states: jnp.ndarray, actions: jnp.ndarray) -> jnp.ndarray:
        if self.cond_residual:
            return CondResMLP(
                1, self.hidden_dims, use_layernorm=self.use_layernorm, dropout_rate=self.dropout_rate, activation_type=self.activation_type
            )(actions, states).squeeze()

        return MLP(
            1, self.hidden_dims, use_layernorm=self.use_layernorm, dropout_rate=self.dropout_rate, activation_type=self.activation_type
        )(jnp.concatenate([states, actions], -1)).squeeze()


class EnsembleQ(nn.Module):
    hidden_dims: Sequence[int]
    activation_type: str = "Mish"
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None
    num_qs: int = 2
    cond_residual: bool = False

    @nn.compact
    def __call__(self, states: jnp.ndarray, actions: jnp.ndarray):
        VmapQ = nn.vmap(QNet, variable_axes={"params": 0}, split_rngs={"params": True}, in_axes=None, out_axes=0, axis_size=self.num_qs)
        qs = VmapQ(
            hidden_dims=self.hidden_dims,
            activation_type=self.activation_type,
            dropout_rate=self.dropout_rate,
            use_layernorm=self.use_layernorm,
            cond_residual=self.cond_residual,
        )(states, actions)

        return qs


class DiscreteEnsembleQ(nn.Module):
    hidden_dims: Sequence[int]
    act_dim: int
    activation_type: str = "Mish"
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None
    num_qs: int = 2

    @nn.compact
    def __call__(self, states: jnp.ndarray):
        VmapQ = nn.vmap(MLP, variable_axes={"params": 0}, split_rngs={"params": True}, in_axes=None, out_axes=0, axis_size=self.num_qs)

        qs = VmapQ(
            output_dim=self.act_dim,
            hidden_dims=self.hidden_dims,
            activation_type=self.activation_type,
            dropout_rate=self.dropout_rate,
            use_layernorm=self.use_layernorm,
        )(states)  # (B, dim)

        return qs


if __name__ == "__main__":
    import jax
    import optax
    from flax.training.train_state import TrainState

    qs_fn = EnsembleQ((5, 5))
    params = qs_fn.init(jax.random.PRNGKey(0), jnp.zeros((1, 4, 3)), {"state": jnp.zeros((1, 1, 2))})

    key_ = jax.random.PRNGKey(1)

    qs_ = qs_fn.apply(params, jax.random.normal(key_, (256, 4, 3)), {"state": jax.random.normal(key_, (256, 1, 2))})

    critic = TrainState.create(apply_fn=qs_fn.apply, params=params, tx=optax.adam(learning_rate=3e-4))  # consistent with the loss function
