from typing import Optional, Sequence

import jax
import jax.numpy as jnp
from flax import linen, nnx

# from .initialization import default_init


activation_dict = {
    "ReLU": nnx.relu,
    "ELU": nnx.elu,
    "GELU": nnx.gelu,
    "Tanh": nnx.tanh,
    "Mish": jax.nn.mish,
    "Softplus": nnx.softplus,
}


class SinusoidalPosEmb(nnx.Module):
    """
    time: (batch_size, 1) -> (batch_size, output_size)
    """

    def __init__(self, output_size: int = 16):
        self.output_size = output_size

    def __call__(self, x: jnp.ndarray):
        half_dim = self.output_size // 2
        f = jnp.log(10000) / (half_dim - 1)
        f = jnp.exp(jnp.arange(half_dim) * -f)
        f = x * f
        return jnp.concatenate([jnp.cos(f), jnp.sin(f)], axis=-1)


class LipschitzLinear(nnx.Linear):
    def __init__(self, in_features: int, out_features: int, **kwargs):
        super().__init__(in_features, out_features, **kwargs)

        # initialize the constant
        init_c = jnp.max(jnp.abs(self.kernel.value).sum(axis=-1))
        self.c = nnx.Param(jnp.array((init_c,), self.param_dtype))

    def __call__(self, inputs):
        # re-scale the kernel by the softplus_c
        w = self.kernel.value
        c = self.c.value
        absrowsum = jnp.sum(jnp.abs(w), axis=-1)
        scale = jnp.minimum(1.0, nnx.softplus(c) / absrowsum)
        self.kernel.value = w * scale[:, None]
        return super().__call__(inputs)

    def get_softplus_c(self):
        return nnx.softplus(self.c.value[0])  # scalar, used in the loss


class LipschitzMLP(nnx.Module):
    # including in and out: [in_dim, h_dim1, h_dim2, ..., h_dimN, o_dim]
    def __init__(
        self,
        rngs: nnx.Rngs,
        layer_dims: Sequence[int],
        activation_type: str = "Mish",
        output_activation: str = None,
        use_layernorm: bool = False,
        dropout_rate: Optional[float] = None,
    ):
        layers = []
        linear_layers = []
        for i, (in_dim, out_dim) in enumerate(zip(layer_dims[:-1], layer_dims[1:])):
            layers.append(LipschitzLinear(in_dim, out_dim, rngs=rngs))
            linear_layers.append(layers[-1])
            if i < len(layer_dims) - 2:
                if use_layernorm:  # not used for the final layer
                    layers.append(nnx.LayerNorm(out_dim, rngs=rngs))
                layers.append(activation_dict[activation_type])
            if dropout_rate is not None and dropout_rate > 0:
                layers.append(nnx.Dropout(dropout_rate, rngs=rngs))

        if output_activation is not None:
            layers.append(activation_dict[output_activation])

        self.layers = nnx.Sequential(*layers)
        self.linear_layers = linear_layers

    def __call__(self, x):
        return self.layers(x)


class MLP(nnx.Module):
    # including in and out: [in_dim, h_dim1, h_dim2, ..., h_dimN, o_dim]
    def __init__(
        self,
        rngs: nnx.Rngs,
        layer_dims: Sequence[int],
        activation_type: str = "Mish",
        output_activation: str = None,
        use_layernorm: bool = False,
        dropout_rate: Optional[float] = None,
    ):
        layers = []
        for i, (in_dim, out_dim) in enumerate(zip(layer_dims[:-1], layer_dims[1:])):
            layers.append(nnx.Linear(in_dim, out_dim, rngs=rngs))
            if i < len(layer_dims) - 2:
                if use_layernorm:  # not used for the final layer
                    layers.append(nnx.LayerNorm(out_dim, rngs=rngs))
                layers.append(activation_dict[activation_type])
            if dropout_rate is not None and dropout_rate > 0:
                layers.append(nnx.Dropout(dropout_rate, rngs=rngs))

        if output_activation is not None:
            layers.append(activation_dict[output_activation])

        self.layers = nnx.Sequential(*layers)

    def __call__(self, x):
        return self.layers(x)


class EnsembleMLP(nnx.Module):
    """
    Ensemble MLP with multiple heads
    Each head is a MLP with the same structure
    The input is of shape (batch_size, in_dim)
    The output is of shape (num_heads, batch_size, out_dim)

    But this cannot calculate the gradient
    """

    def __init__(self, rngs: nnx.Rngs, num_heads: int, **kwargs):

        self.num_heads = num_heads

        @nnx.split_rngs(splits=num_heads)
        @nnx.vmap
        def make_model(rngs_):
            return MLP(rngs=rngs_, **kwargs)

        self.vmap_mlp = make_model(rngs)

    def __call__(self, x):
        @nnx.vmap(in_axes=(None, 0), out_axes=0)
        def forward(x, model):
            return model(x)

        return forward(x, self.vmap_mlp)


class DiffusionMLP(nnx.Module):

    def __init__(
        self,
        rngs: nnx.Rngs,
        output_dim: int,
        hiddien_dims: Sequence[int],  # (dim(xt) + dim(t_embedding) + dim(cond) -> hiddien_dims...-> dim(xt))
        cond_dim: int = 0,
        activation_type: str = "Mish",
        use_layernorm: bool = False,
        dropout_rate: Optional[float] = None,
        time_dim: int = 16,
    ):
        time_embedding_layers = [
            SinusoidalPosEmb(time_dim),
            nnx.Linear(time_dim, time_dim * 2, rngs=rngs),
            jax.nn.mish,
            nnx.Linear(time_dim * 2, time_dim, rngs=rngs),
        ]
        self.time_embedding = nnx.Sequential(*time_embedding_layers)
        mlp_dims = (output_dim + time_dim + cond_dim,) + tuple(hiddien_dims) + (output_dim,)
        self.mlp = MLP(
            rngs=rngs,
            layer_dims=mlp_dims,
            activation_type=activation_type,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
        )

    def __call__(self, x: jnp.ndarray, time: jnp.ndarray, cond: jnp.ndarray):
        input_shape = x.shape
        # flatten chunk
        x = x.reshape(input_shape[0], -1)
        cond = cond.reshape(input_shape[0], -1)
        time_emb = self.time_embedding(time)
        x = jnp.concatenate([x, time_emb, cond], axis=-1)  # (xt, t, cond)
        return self.mlp(x).reshape(input_shape)
