"""Token Learner implementation from https://arxiv.org/abs/2106.11297.

Implementation from github.com/google-research/scenic/projects/token_learner/.
"""

from typing import Callable, Optional, Sequence

import torch
from torch import nn
# import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np

Initializer = Callable[[np.ndarray, Sequence[int], torch.dtype], np.ndarray]


class IdentityLayer(nn.Module):
  """Identity layer, convenient for giving a name to an array."""

  # @nn.compact
  def __call__(self, x: np.ndarray) -> np.ndarray:
    return x


class MlpBlock(nn.Module):
  """Transformer MLP / feed-forward block."""

  def __init__(
    self,
    mlp_dim: int,
    out_dim: Optional[int] = None,
    dropout_rate: float = 0.1,
    use_bias: bool = True,
    # kernel_init: Initializer = nn.initializers.xavier_uniform()
    # bias_init: Initializer = nn.initializers.normal(stddev=1e-6)
    # activation_fn: Callable[[np.ndarray], np.ndarray] = nn.gelu
    precision: Optional[jax.lax.Precision] = None,
    dtype: np.ndarray = torch.float32,
  ):
    self.mlp_dim=mlp_dim
    self.out_dim=out_dim
    self.dropout_rate=dropout_rate
    self.use_bias=use_bias
    self.precision=precision
    self.dtype=dtype


  # @nn.compact
  def __call__(self, inputs: np.ndarray, *, deterministic: bool):
    """Applies Transformer MlpBlock module."""
    actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
    # x = nn.Dense(
    #     self.mlp_dim,
    #     dtype=self.dtype,
    #     use_bias=self.use_bias,
    #     kernel_init=self.kernel_init,
    #     bias_init=self.bias_init,
    #     precision=self.precision,
    # )(inputs)
    x = nn.Linear(inputs.shape[-1],self.mlp_dim)(inputs)
    # x = IdentityLayer(name='mlp1')(self.activation_fn(x))
    x = nn.Dropout(p=self.dropout_rate)(x)
    # output = nn.Dense(
    #     actual_out_dim,
    #     dtype=self.dtype,
    #     use_bias=self.use_bias,
    #     kernel_init=self.kernel_init,
    #     bias_init=self.bias_init,
    #     precision=self.precision,
    # )(x)
    x = nn.Linear(x.shape[-1],actual_out_dim)(x)
    output = x#IdentityLayer(name='mlp2')(output)
    output = nn.Dropout(p=self.dropout_rate)(
        output
    )
    return output


class TokenLearnerModuleV11(nn.Module):
  """TokenLearner module Version 1.1, using slightly different conv. layers.

  Instead of using 4 conv. layers with small channels to implement spatial
  attention, this version uses a MLP with gelu inbetween. It also uses softmax
  instead of sigmoid. We confirmed that this version works better in general.

  Attributes:
    num_tokens: Number of tokens.
    bottleneck_dim: The size of hidden units in the MLP for spatial attention.
    dropout_rate: Dropout rate.
  """

  def __init__(self, num_tokens, bottleneck_dim=64, dropout_rate=0):
    self.num_tokens=num_tokens
    self.bottleneck_dim=bottleneck_dim
    self.dropout_rate=dropout_rate

  # @nn.compact
  def __call__(self, inputs: np.ndarray, deterministic: bool) -> np.ndarray:
    """Applies learnable tokenization to the 2D inputs.

    Args:
      inputs: Inputs of shape `[bs, h, w, c]`.
      deterministic: Weather we are in the deterministic mode (e.g inference
        time) or not.

    Returns:
      Output of shape `[bs, n_token, c]`.
    """
    if inputs.ndim == 4:
      n, c, h, w  = inputs.shape
      inputs = torch.reshape(inputs, [n,  c, h * w])
      inputs = torch.transpose(inputs, 1, 2 )
    feature_shape = inputs.shape

    selected = inputs

    selected = nn.LayerNorm(512)(selected)

    selected = MlpBlock(
        mlp_dim=self.bottleneck_dim,
        out_dim=self.num_tokens,
        dropout_rate=self.dropout_rate,
        # activation_fn=nn.gelu,
        # name='token_masking',
    )(selected, deterministic=deterministic)

    selected = torch.reshape(
        selected, [feature_shape[0], -1, self.num_tokens]
    )  # Shape: [bs, h*w, n_token].
    selected = torch.transpose(selected,1, 2)  # Shape: [bs, n_token, h*w].
    selected = nn.Softmax(dim=-1)(selected)

    feat = inputs
    feat = torch.reshape(
        feat, [feature_shape[0], -1, feature_shape[-1]]
    )  # Shape: [bs, h*w, c].

    feat = torch.einsum('...si,...id->...sd', selected, feat)

    return feat
