import equinox as eqx
import jax.numpy as jnp
from jaxtyping import Float, Array, Bool, PyTree, Int
import jax

class ScaledDotProductAttention(eqx.Module):
    temperature: float
    dropout: eqx.Module

    def __init__(self, temperature: float, dropout_rate: float):
        self.temperature = temperature
        self.dropout = eqx.nn.Dropout(dropout_rate)

    @eqx.filter_jit
    def __call__(self, q: Float[Array, "T_q key_dim"], k: Float[Array, "T_k key_dim"], 
                 v: Float[Array, "T_k val_dim"], mask: Bool[Array, "T_q T_k"], dropout_key: Array):
        attn = jnp.matmul(q / self.temperature, k.T) # (T_q, T_k)
        attn = jnp.where(mask, attn, -1e9)
        attn = jax.nn.softmax(attn, -1)
        attn = self.dropout(attn, key=dropout_key)
        output = jnp.matmul(attn, v)
        return output
    
class MultiheadAttention(eqx.Module):
    wq: eqx.Module
    wk: eqx.Module
    wv: eqx.Module
    attn: ScaledDotProductAttention
    dropout: eqx.Module
    fc: eqx.Module
    nhead: int
    d_k: int
    d_v: int
    norm: eqx.Module

    def __init__(self, input_dim: int, nhead: int, d_k: int, d_v: int, dropout_rate: float, key: Array):
        self.nhead = nhead
        self.d_k = d_k
        self.d_v = d_v
        key1, key2, key3, key4 = jax.random.split(key, 4)
        self.wq = eqx.nn.Linear(input_dim, nhead*d_k, False, key=key1)
        self.wk = eqx.nn.Linear(input_dim, nhead*d_k, False, key=key2)
        self.wv = eqx.nn.Linear(input_dim, nhead*d_v, False, key=key3)
        self.attn = ScaledDotProductAttention(d_k**0.5, dropout_rate)
        self.fc = eqx.nn.Linear(d_v*nhead, input_dim, key=key4)
        self.norm = eqx.nn.LayerNorm(input_dim, 1e-6)
        self.dropout = eqx.nn.Dropout(dropout_rate)

    @eqx.filter_jit
    def __call__(self, q: Float[Array, "T_q key_dim"], k: Float[Array, "T_k key_dim"], 
                 v: Float[Array, "T_k val_dim"], mask: Bool[Array, "T_q T_k"], key: Array):
        T_q = q.shape[0]
        T_k = k.shape[0]
        residual = q
        q = jax.vmap(self.wq)(q).reshape((T_q, self.nhead, self.d_k)) # (T_q, n_head, d_k)
        k = jax.vmap(self.wk)(k).reshape((T_k, self.nhead, self.d_k)) # (T_k, n_head, d_k)
        v = jax.vmap(self.wv)(v).reshape((T_k, self.nhead, self.d_v)) # (T_k, n_head, d_v)
        q = q.transpose(1, 0, 2) # (nhead, T_q, d_k)
        k = k.transpose(1, 0, 2) # (nhead, T_k, d_k)
        v = v.transpose(1, 0, 2) # (nhead, T_k, d_v)
        attn_key, dropout_key = jax.random.split(key)
        output = jax.vmap(self.attn, (0, 0, 0, None, 0))(q, k, v, mask, jax.random.split(attn_key, self.nhead)) # (nhead, T_q, d_v)
        output = output.transpose(1, 0, 2).reshape(T_q, self.nhead*self.d_v)
        output = jax.vmap(self.fc)(output) # (T, output_dim)
        output = self.dropout(output, key=dropout_key)
        output = output + residual
        output = jax.vmap(self.norm)(output)
        return output
    
class AdaptiveMultiheadAttention(eqx.Module):
    wq: eqx.Module
    wk: eqx.Module
    wv: eqx.Module
    attn: ScaledDotProductAttention
    dropout: eqx.Module
    fc: eqx.Module
    nhead: int
    d_k: int
    d_v: int
    norm: eqx.Module
    E1: Array
    E2: Array

    def __init__(self, input_dim: int, nhead: int, d_k: int, d_v: int, num_keys: int, num_queries: int, dropout_rate: float, key: Array):
        self.nhead = nhead
        self.d_k = d_k
        self.d_v = d_v
        key1, key2, key3, key4, key5, key6 = jax.random.split(key, 6)
        self.wq = eqx.nn.Linear(input_dim, nhead*d_k, False, key=key1)
        self.wk = eqx.nn.Linear(input_dim, nhead*d_k, False, key=key2)
        self.wv = eqx.nn.Linear(input_dim, (nhead+1)*d_v, False, key=key3)
        self.attn = ScaledDotProductAttention(d_k**0.5, dropout_rate)
        self.fc = eqx.nn.Linear(d_v*nhead, input_dim, key=key4)
        self.norm = eqx.nn.LayerNorm(input_dim, 1e-6)
        self.dropout = eqx.nn.Dropout(dropout_rate)
        self.E1 = jax.random.normal(key5, (num_keys, d_k)) * 0.01
        self.E2 = jax.random.normal(key6, (num_queries, d_k)) * 0.01

    @eqx.filter_jit
    def __call__(self, q: Float[Array, "T_q key_dim"], k: Float[Array, "T_k key_dim"], 
                 v: Float[Array, "T_k val_dim"], mask: Bool[Array, "T_q T_k"], key: Array):
        T_q = q.shape[0]
        T_k = k.shape[0]
        residual = q
        q = jax.vmap(self.wq)(q).reshape((T_q, self.nhead, self.d_k)) # (T_q, n_head, d_k)
        k = jax.vmap(self.wk)(k).reshape((T_k, self.nhead, self.d_k)) # (T_k, n_head, d_k)
        v = jax.vmap(self.wv)(v).reshape((T_k, self.nhead, self.d_v)) # (T_k, n_head+1, d_v)
        q = q.transpose(1, 0, 2) # (nhead, T_q, d_k)
        k = k.transpose(1, 0, 2) # (nhead, T_k, d_k)
        v = v.transpose(1, 0, 2) # (nhead+1, T_k, d_v)
        attn_key, dropout_key = jax.random.split(key)
        k = jnp.concatenate((k, self.E1[None, :, :]), 0) # (nhead+1, T_k, d_k)
        q = jnp.concatenate((q, self.E2[None, :, :]), 0) # (nhead+1, T_q, d_k)
        output = jax.vmap(self.attn, (0, 0, 0, None, 0))(q, k, v, mask, jax.random.split(attn_key, self.nhead+1)) # (nhead+1, T_q, d_v)
        output = output.transpose(1, 0, 2).reshape(T_q, self.nhead*self.d_v)
        output = jax.vmap(self.fc)(output) # (T, output_dim)
        output = self.dropout(output, key=dropout_key)
        output = output + residual
        output = jax.vmap(self.norm)(output)
        return output

        # E1 = jnp.broadcast_to(self.E1[None, :, :], (self.nhead, *self.E1.shape))
        # E2 = jnp.broadcast_to(self.E2[None, :, :], (self.nhead, *self.E2.shape))
        # k = jnp.concatenate((k, E1), -1) # (nhead, T_k, 2*d_k)
        # q = jnp.concatenate((q, E2), -1) # (nhead, T_q, 2*d_k)
        # output = jax.vmap(self.attn, (0, 0, 0, None, 0))(q, k, v, mask, jax.random.split(attn_key, self.nhead)) # (nhead, T_q, d_v)
        # output = output.transpose(1, 0, 2).reshape(T_q, self.nhead*self.d_v)
        # output = jax.vmap(self.fc)(output) # (T, output_dim)
        # output = self.dropout(output, key=dropout_key)
        # output = output + residual
        # output = jax.vmap(self.norm)(output)
        # return output

    
class PositionWiseFeedForward(eqx.Module):
    norm: eqx.Module
    dropout: eqx.Module
    linear1: eqx.Module
    linear2: eqx.Module

    def __init__(self, input_dim: int, d_inner: int, dropout_rate: float, key: Array):
        key1, key2 = jax.random.split(key)
        self.linear1 = eqx.nn.Linear(input_dim, d_inner, key=key1)
        self.linear2 = eqx.nn.Linear(d_inner, input_dim, key=key2)
        self.norm = eqx.nn.LayerNorm(input_dim, 1e-6)
        self.dropout = eqx.nn.Dropout(dropout_rate)

    @eqx.filter_jit
    def __call__(self, x: Float[Array, "input_dim"], key: Array):
        key1, key2 = jax.random.split(key)
        residual = x
        x = self.linear1(x)
        x = jax.nn.gelu(x)
        x = self.dropout(x, key=key1)
        x = self.linear2(x)
        x = self.dropout(x, key=key2)
        x = x + residual
        x = self.norm(x)
        return x
    
class AttnLayer(eqx.Module): # Abstract class

    @eqx.filter_jit
    def __call__(self, X: Float[Array, "T input_dim"], mask: Bool[Array, "T T"], key: Array):
        key1, key2 = jax.random.split(key, 2)
        X = self.attn(X, X, X, mask, key1)
        X = jax.vmap(self.ff)(X, jax.random.split(key2, X.shape[0]))
        return X

class SelfAttentionLayer(AttnLayer):
    ff: PositionWiseFeedForward
    attn: eqx.Module

    def __init__(self, input_dim: int, nhead: int, d_k: int, d_v: int, d_inner: int, dropout_rate: float, key: Array):
        key1, key2 = jax.random.split(key, 2)
        self.attn = MultiheadAttention(input_dim, nhead, d_k, d_v, dropout_rate, key1)
        self.ff = PositionWiseFeedForward(input_dim, d_inner, dropout_rate, key2)

class AdaptiveSelfAttentionLayer(AttnLayer):
    attn: AdaptiveMultiheadAttention
    ff: PositionWiseFeedForward

    def __init__(self, input_dim: int, nhead: int, d_k: int, d_v: int, d_inner: int, num_types: int, dropout_rate: float, key: Array):
        key1, key2 = jax.random.split(key, 2)
        self.ff = PositionWiseFeedForward(input_dim, d_inner, dropout_rate, key2)
        self.attn = AdaptiveMultiheadAttention(input_dim, nhead, d_k, d_v, num_types, num_types, dropout_rate, key1)

    
class Transformer(eqx.Module):
    stack: list

    def __init__(self, embed_dim: int, nlayers: int, nhead: int, d_k: int, d_v: int, d_inner: int, dropout_rate: float, key: Array):
        all_keys = jax.random.split(key, nlayers)
        self.stack = [
            SelfAttentionLayer(embed_dim, nhead, d_k, d_v, d_inner, dropout_rate, all_keys[i])
            for i in range(nlayers)
        ]
    
    @eqx.filter_jit
    def __call__(self, X: Float[Array, "T embed_dim"], t_enc: Float[Array, "T embed_dim"], key: Array):
        X = X + t_enc
        all_keys = jax.random.split(key, len(self.stack))
        T = X.shape[0]
        sub_mask = jnp.tril(jnp.ones((T, T), dtype=bool))
        for i, layer in enumerate(self.stack):
            X = layer(X, sub_mask, all_keys[i])
        return X # (T, hdim)