from typing import List, Union

import jax.numpy as jnp
from flax import linen as nn


class ContinuousPositionEmbedding(nn.Module):
    """Sinus positional embedding adapted for continuous signal with given range"""

    size_emb: int
    period_min: float
    period_max: float

    def setup(self):
        size_half = self.size_emb // 2
        tensor_period_ratio = jnp.linspace(0.0, 1.0, size_half)
        self.periods = self.period_min * jnp.power(
            self.period_max / self.period_min, tensor_period_ratio
        )

    def __call__(self, tensor_time: jnp.ndarray) -> jnp.ndarray:
        """Forward.
        Args:
            tensor_time: Input time (size_batch, 1).
        Returns:
            Generated embedding (size_batch, size_emb).
        """
        size_batch = tensor_time.shape[0]
        tensor_phase = tensor_time / self.periods

        tensor_value = jnp.stack(
            [
                jnp.sin(2.0 * jnp.pi * tensor_phase),
                jnp.cos(2.0 * jnp.pi * tensor_phase),
            ],
            axis=-1,
        )

        tensor_value = tensor_value.reshape(size_batch, self.size_emb)
        return tensor_value


class BlockConv1d(nn.Module):
    """1d convolution keeping same temporal length with non-linearity and normalization"""

    size_channel_in: int
    size_channel_out: int
    size_kernel: int
    size_group_norm: int

    @nn.compact
    def __call__(self, tensor_in: jnp.ndarray) -> jnp.ndarray:
        x = nn.Conv(
            features=self.size_channel_out,
            kernel_size=(self.size_kernel,),
            strides=(1,),
            padding="SAME",
        )(tensor_in)
        x = nn.GroupNorm(num_groups=self.size_group_norm)(x)
        x = nn.silu(x)
        return x


class BlockDownsample(nn.Module):
    """Downscale the sequence by 2"""

    size_channel: int

    @nn.compact
    def __call__(self, tensor_in: jnp.ndarray) -> jnp.ndarray:
        return nn.Conv(
            features=self.size_channel,
            kernel_size=(2,),
            strides=(2,),
            padding="VALID",
        )(tensor_in)


class BlockUpsample(nn.Module):
    """Upscale the sequence by 2"""

    size_channel: int

    @nn.compact
    def __call__(self, tensor_in: jnp.ndarray) -> jnp.ndarray:
        return nn.ConvTranspose(
            features=self.size_channel,
            kernel_size=(2,),
            strides=(2,),
            padding="VALID",
        )(tensor_in)


class BlockConv1dResidualConditional(nn.Module):
    """Convolutional block with residual connection and conditioning using FiLM"""

    size_channel_in: int
    size_channel_out: int
    size_cond: int
    size_kernel: int
    size_group_norm: int

    @nn.compact
    def __call__(self, tensor_in: jnp.ndarray, tensor_cond: jnp.ndarray) -> jnp.ndarray:
        cond_film = nn.Dense(features=2 * self.size_channel_out)(tensor_cond)
        cond_scale = cond_film[..., : self.size_channel_out]
        cond_bias = cond_film[..., self.size_channel_out :]

        # Expend dim for broadcasting
        cond_scale = jnp.expand_dims(cond_scale, axis=-2)
        cond_bias = jnp.expand_dims(cond_bias, axis=-2)

        x = BlockConv1d(
            size_channel_in=self.size_channel_in,
            size_channel_out=self.size_channel_out,
            size_kernel=self.size_kernel,
            size_group_norm=self.size_group_norm,
        )(tensor_in)

        x = cond_scale * x + cond_bias

        x = BlockConv1d(
            size_channel_in=self.size_channel_out,
            size_channel_out=self.size_channel_out,
            size_kernel=self.size_kernel,
            size_group_norm=self.size_group_norm,
        )(x)

        residual = nn.Conv(features=self.size_channel_out, kernel_size=(1,))(tensor_in)
        return x + residual


class ModelUnetResidualConditional(nn.Module):
    """Implement a 1d convolutional Unet architecture with
    residual block, group normalization and conditional vector.
    Uses position sinusoidal embedding.
    """

    size_channel: int
    size_emb_transport: int
    size_cond: int
    size_channel_hidden: List[int]
    period_min: float
    period_max: float
    size_kernel: int
    size_group_norm: int

    @nn.compact
    def __call__(
        self,
        tensor_traj: jnp.ndarray,
        tensor_transport: jnp.ndarray,
        tensor_cond: Union[jnp.ndarray, None] = None,
    ) -> jnp.ndarray:
        transport_embedded = ContinuousPositionEmbedding(
            size_emb=self.size_emb_transport,
            period_min=self.period_min,
            period_max=self.period_max,
        )(tensor_transport)

        transport_embedded = nn.Dense(features=self.size_emb_transport * 4)(
            transport_embedded
        )
        transport_embedded = nn.silu(transport_embedded)
        transport_embedded = nn.Dense(features=self.size_emb_transport)(
            transport_embedded
        )
        transport_embedded = nn.silu(transport_embedded)

        if tensor_cond is not None:
            cond_all = jnp.concatenate([transport_embedded, tensor_cond], axis=-1)
        else:
            cond_all = transport_embedded

        # Transpose from (batch, length, channel) to (batch, channel, length) for Conv
        # In Flax, Conv1D operates on the last dimension, so we expect (batch, length, channel)
        # No need to transpose if data is already in this format.

        list_residuals = []
        x = tensor_traj

        # Downsample path
        list_size_channel = [self.size_channel] + list(self.size_channel_hidden)
        for i in range(len(list_size_channel) - 1):
            size_in = list_size_channel[i]
            size_out = list_size_channel[i + 1]

            x = BlockConv1dResidualConditional(
                size_channel_in=size_in,
                size_channel_out=size_out,
                size_cond=cond_all.shape[-1],
                size_kernel=self.size_kernel,
                size_group_norm=self.size_group_norm,
            )(x, cond_all)

            list_residuals.append(x)
            x = BlockDownsample(size_channel=size_out)(x)

        # Middle path
        size_last = list_size_channel[-1]
        x = BlockConv1dResidualConditional(
            size_channel_in=size_last,
            size_channel_out=size_last,
            size_cond=cond_all.shape[-1],
            size_kernel=self.size_kernel,
            size_group_norm=self.size_group_norm,
        )(x, cond_all)

        # Upsample path
        for i in range(len(list_size_channel) - 1, 0, -1):
            size_in = list_size_channel[i]
            if i == 1:
                size_out = list(self.size_channel_hidden)[0]
            else:
                size_out = list_size_channel[i - 1]

            x = BlockUpsample(size_channel=size_in)(x)
            x = jnp.concatenate([x, list_residuals.pop()], axis=-1)

            x = BlockConv1dResidualConditional(
                size_channel_in=x.shape[-1],  # Adjusted for concatenation
                size_channel_out=size_out,
                size_cond=cond_all.shape[-1],
                size_kernel=self.size_kernel,
                size_group_norm=self.size_group_norm,
            )(x, cond_all)

        # Final convolution
        return nn.Conv(features=self.size_channel, kernel_size=(1,))(x)


class ModelDenseSimple(nn.Module):
    """Simple MLP model concatenating all features as input layer"""

    size_channel: int
    size_length: int
    size_cond: int
    size_hidden_list: List[int]

    @nn.compact
    def __call__(
        self,
        tensor_traj: jnp.ndarray,
        tensor_transport: jnp.ndarray,
        tensor_cond: Union[jnp.ndarray, None] = None,
    ) -> jnp.ndarray:
        in_features = [tensor_traj.reshape(tensor_traj.shape[0], -1), tensor_transport]
        if tensor_cond is not None:
            in_features.append(tensor_cond)

        x = jnp.concatenate(in_features, axis=-1)

        for size in self.size_hidden_list:
            x = nn.Dense(features=size)(x)
            x = nn.LayerNorm()(x)
            x = nn.silu(x)

        x = nn.Dense(features=self.size_channel * self.size_length)(x)
        return x.reshape(x.shape[0], self.size_length, self.size_channel)


class MLPNet(nn.Module):
    """Multi layer perception implementation"""

    dim_in: int
    dim_hidden: List[int]
    dim_out: int
    activation: nn.Module = nn.silu
    norm_layer: bool = False

    @nn.compact
    def __call__(self, *tensors):
        x = jnp.concatenate(tensors, axis=1)
        assert x.shape[1] == self.dim_in, (
            f"Invalid input tensor dimension: {x.shape[1]} != {self.dim_in}"
        )

        x = nn.Dense(features=self.dim_hidden[0])(x)
        x = self.activation(x)

        for i in range(1, len(self.dim_hidden)):
            x = nn.Dense(features=self.dim_hidden[i])(x)
            if self.norm_layer:
                x = nn.LayerNorm()(x)
            x = self.activation(x)

        x = nn.Dense(features=self.dim_out)(x)
        return x


class ResnetBlock(nn.Module):
    size_channel: int

    @nn.compact
    def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
        y = nn.silu(x)
        y = nn.Conv(features=self.size_channel, kernel_size=(3, 3), padding="SAME")(y)
        y = nn.silu(y)
        y = nn.Conv(features=self.size_channel, kernel_size=(3, 3), padding="SAME")(y)
        return x + y


class ResnetStack(nn.Module):
    size_channel_in: int
    size_channel_out: int
    size_block: int

    @nn.compact
    def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
        x = nn.Conv(features=self.size_channel_out, kernel_size=(3, 3), padding="SAME")(
            x
        )
        x = nn.max_pool(x, window_shape=(3, 3), strides=(2, 2), padding="SAME")
        for _ in range(self.size_block):
            x = ResnetBlock(size_channel=self.size_channel_out)(x)
        return x


class ImpalaEncoder(nn.Module):
    size_stacks_channel: List[int]
    size_fc_in: int
    size_fc_hidden: List[int]
    size_fc_out: int
    size_block: int
    use_layer_norm: bool = True
    dropout_rate: float = 0.0

    @nn.compact
    def __call__(self, x: jnp.ndarray, training: bool) -> jnp.ndarray:
        x = x.astype(jnp.float32) / 255.0 - 0.5

        for i in range(1, len(self.size_stacks_channel)):
            x = ResnetStack(
                size_channel_in=self.size_stacks_channel[i - 1],
                size_channel_out=self.size_stacks_channel[i],
                size_block=self.size_block,
            )(x)
            if self.dropout_rate > 0.0:
                x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not training)

        x = x.reshape((x.shape[0], -1))
        x = nn.silu(x)
        if self.use_layer_norm:
            x = nn.LayerNorm()(x)

        x = MLPNet(
            dim_in=self.size_fc_in,
            dim_hidden=self.size_fc_hidden,
            dim_out=self.size_fc_out,
            norm_layer=self.use_layer_norm,
        )(x)

        x = nn.silu(x)
        if self.use_layer_norm:
            x = nn.LayerNorm()(x)

        return x


class FiLMResNetBlock(nn.Module):
    """A FiLM-modulated residual block for MLPs."""

    hidden_dim: int
    cond_dim: int

    @nn.compact
    def __call__(self, x: jnp.ndarray, cond: jnp.ndarray) -> jnp.ndarray:
        residual = x

        # Project residual if dimensions differ
        if x.shape[-1] != self.hidden_dim:
            residual = nn.Dense(features=self.hidden_dim)(residual)

        # Main path
        h = nn.Dense(features=self.hidden_dim)(x)
        h = nn.silu(h)
        h = nn.Dense(features=self.hidden_dim)(h)
        h = nn.silu(h)

        # FiLM modulation
        film_params = nn.Dense(features=2 * self.hidden_dim)(cond)
        gamma = film_params[..., : self.hidden_dim]
        beta = film_params[..., self.hidden_dim :]

        h = h * gamma + beta

        return h + residual


class ModelUnetTDFlow(nn.Module):
    """U-Net-style MLP for TD-Flow, operating on feature dimensions."""

    state_dim: int
    size_emb_transport: int
    hidden_dims: List[int]  # e.g., [256, 128, 64]
    period_min: float
    period_max: float

    @nn.compact
    def __call__(
        self,
        x_t: jnp.ndarray,
        t: jnp.ndarray,
        cond_state: jnp.ndarray,
        cond_action: jnp.ndarray,
        cond_goal: jnp.ndarray,
    ) -> jnp.ndarray:
        # 1. Condition Encoding
        transport_embedded = ContinuousPositionEmbedding(
            size_emb=self.size_emb_transport,
            period_min=self.period_min,
            period_max=self.period_max,
        )(t)

        cond_all = jnp.concatenate(
            [transport_embedded, cond_state, cond_action, cond_goal], axis=-1
        )
        cond_dim = cond_all.shape[-1]

        # 2. Initial Projection
        h = nn.Dense(features=self.hidden_dims[0])(x_t)

        # 3. Encoder Path
        skip_connections = []
        for i, dim in enumerate(self.hidden_dims[:-1]):
            h = FiLMResNetBlock(hidden_dim=dim, cond_dim=cond_dim)(h, cond_all)
            skip_connections.append(h)
            # Downsample feature dimension
            h = nn.Dense(features=self.hidden_dims[i + 1])(h)

        # 4. Bottleneck
        h = FiLMResNetBlock(hidden_dim=self.hidden_dims[-1], cond_dim=cond_dim)(
            h, cond_all
        )

        # 5. Decoder Path
        for i, dim in reversed(list(enumerate(self.hidden_dims[:-1]))):
            # Upsample feature dimension
            h = nn.Dense(features=dim)(h)
            # Concatenate with skip connection
            skip = skip_connections.pop()
            h = jnp.concatenate([h, skip], axis=-1)
            # Process with FiLM block
            h = FiLMResNetBlock(hidden_dim=dim, cond_dim=cond_dim)(h, cond_all)

        # 6. Final Projection
        output = nn.Dense(features=self.state_dim)(h)
        return output


class Identity(nn.Module):
    """An identity layer that returns the input a it is."""

    @nn.compact
    def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
        return x
