import flax.linen as nn
import jax.numpy as jnp
from typing import Optional, Sequence
from .initialization import default_init


def mish(x):
    return x * jnp.tanh(nn.softplus(x))


activation_dict = {
    "ReLU": nn.relu,
    "ELU": nn.elu,
    "GELU": nn.gelu,
    "Tanh": nn.tanh,
    "Mish": mish,
    "Softplus": nn.softplus,
}


class SinusoidalPosEmb(nn.Module):
    # used for timestep embedding
    output_size: int = 16

    @nn.compact
    def __call__(self, x: jnp.ndarray):
        half_dim = self.output_size // 2
        f = jnp.log(10000) / (half_dim - 1)
        f = jnp.exp(jnp.arange(half_dim) * -f)
        f = x * f
        return jnp.concatenate([jnp.cos(f), jnp.sin(f)], axis=-1)
    

class MLP(nn.Module):
    output_dim: int
    hidden_dims: Sequence[int]
    activation_type: str = "Mish"
    activate_final: int = False
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None

    @nn.compact
    def __call__(self, x: jnp.ndarray, train: bool = False) -> jnp.ndarray:
        for i, size in enumerate(self.hidden_dims + (self.output_dim,)):
            x = nn.Dense(size, kernel_init= nn.initializers.lecun_uniform())(x)
            if i + 1 < len(self.hidden_dims) or self.activate_final:  # hidden layers
                if self.use_layernorm:
                    x = nn.LayerNorm()(x)
                x = activation_dict[self.activation_type](x)
                if self.dropout_rate is not None and self.dropout_rate > 0:
                    x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
        return x


class DiffusionMLP(nn.Module):
    output_dim: int  # e.g. action_dim * horizon_steps
    time_dim: int = 16
    mlp_dims: tuple = (256, 256)
    activation_type: str = "Mish"
    activate_final: int = False
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None

    def setup(self):

        self.time_embedding = nn.Sequential([SinusoidalPosEmb(self.time_dim), nn.Dense(self.time_dim * 2), mish, nn.Dense(self.time_dim)])
        self.mlp_mean = MLP(
            output_dim=self.output_dim,
            hidden_dims=self.mlp_dims,
            activation_type=self.activation_type,
            use_layernorm=self.use_layernorm,
            dropout_rate=self.dropout_rate,
        )

    def __call__(self, x: jnp.ndarray, time: jnp.ndarray, cond: jnp.ndarray,  train: bool = False) -> jnp.ndarray:
        # e.g. (batch_size, horizon_step, act_dim)
        # time.shape = (B, 1)
        input_shape = x.shape  
        # flatten chunk
        x = x.reshape(input_shape[0], -1)
        # flatten history
        state = cond.reshape(input_shape[0], -1)
        time_emb = self.time_embedding(time)
        x = jnp.concatenate([x, time_emb, state], axis=-1)

        # mlp head
        # return self.mlp_mean(x, train=train)
        return self.mlp_mean(x, train=train).reshape(input_shape)
    

class DiffusionPlanningMLP(DiffusionMLP):

    def __call__(self, x: jnp.ndarray, time: jnp.ndarray, train: bool = False) -> jnp.ndarray:
        input_shape = x.shape  # e.g. (batch_size, horizon_step+1, state_dim)

        # flatten chunk
        x = x.reshape(input_shape[0], -1)
        time_emb = self.time_embedding(time)
        x = jnp.concatenate([x, time_emb], axis=-1)

        return self.mlp_mean(x, train=train).reshape(input_shape)


class CondResMLP(nn.Module):
    output_dim: int
    hidden_dims: Sequence[int]
    activation_type: str = "Mish"
    activate_final: int = False
    use_layernorm: bool = False
    dropout_rate: Optional[float] = None

    """
    duplicate the condition vector and concatenate it to the inputs for each layer
    """

    @nn.compact
    def __call__(self, x: jnp.ndarray, cond: jnp.ndarray, train: bool = False) -> jnp.ndarray:
        for i, size in enumerate(self.hidden_dims+ (self.output_dim,)):
            x = nn.Dense(size, kernel_init=default_init())(jnp.concat([x, cond], axis=-1))
            if i + 1 < len(self.hidden_dims) or self.activate_final:  # hidden layers
                if self.use_layernorm:
                    x = nn.LayerNorm()(x)
                x = activation_dict[self.activation_type](x)
                if self.dropout_rate is not None and self.dropout_rate > 0:
                    x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
        return x
