import jax
import jax.numpy as jnp
import equinox as eqx
from jaxtyping import Array, Float, Int, Bool, PyTree
from .modules.utils import forward_pass, get_dts # , pad_to_len
from .modules.ode import integrate

class RNN(eqx.Module):
    hidden_layer: list
    hdim: int

    def __init__(self, hdim: int, embed_dim: int, key: Array):
        self.hidden_layer = [
            eqx.nn.Linear(embed_dim+hdim+1, hdim, key=key),
            jax.nn.relu # Use ReLU instead of exp to avoid overflow
        ]
        self.hdim = hdim

    def step(self, h: Float[Array, "hdim"], embed: Float[Array, "embed_dim"], dt: Float[Array, ""]):
        temp = jnp.concatenate((h, embed, dt[None]), -1) # (hdim+embed_dim+1)
        out = forward_pass(self.hidden_layer, temp)
        return out
    
    def __call__(self, dts: Float[Array, "T"], embeds: Float[Array, "T embed_dim"]):
        h = jnp.zeros(self.hdim)
        xs = (dts, embeds)
        def f(h, x): # This is the step function for scan. Takes (carry, input). Gives (carry, output)
            dt, embed = x # dt: ()  embed: (embed_dim,)
            h = self.step(h, embed, dt)
            return h, h
        carry, hs = jax.lax.scan(f, h, xs)
        return carry, hs
    
class RMTPP(eqx.Module):
    rnn: RNN
    h_stack: eqx.nn.Linear
    omega: jax.Array
    embed: eqx.nn.Embedding

    def __init__(self, hdim: int, num_types: int, embed_dim: int, key: Array):
        key1, key2, key3= jax.random.split(key, 3)
        self.rnn = RNN(hdim, embed_dim, key1)
        self.embed = eqx.nn.Embedding(num_types, embed_dim, key=key1)
        self.h_stack = eqx.nn.Linear(hdim, num_types+1, key=key2)
        self.omega = jax.random.normal(key3, ())

    def __call__(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.)
        embeds = jax.vmap(self.embed)(marks)
        _, hs = self.rnn(dts, embeds)
        ll, dt_ll, mark_ll = jax.vmap(self._get_ll)\
                    (hs[:-1], 
                     dts[1:], 
                     marks[1:], 
                     mask[1:]) # (T-1, )
        ll = ll.sum()
        dt_ll = dt_ll.sum()
        mark_ll = mark_ll.sum()
        return ll, dt_ll, mark_ll

    @eqx.filter_jit
    def _get_ll(self, h: Float[Array, "hdim"], dt: Float[Array, ""], mark: Int[Array, ""], mask: Bool[Array, ""]):
        temp = self.h_stack(h) # (num_types+1, )
        t_temp = self.omega * dt
        h_temp = temp[-1]
        term1 = h_temp + t_temp
        term2 = 1 / self.omega * jnp.exp(h_temp)*(1-jnp.exp(t_temp))
        dt_ll = term1 + term2
        type_logits = jax.nn.log_softmax(temp[:-1]) # (num_types,)
        mark_ll = type_logits[mark]
        dt_ll = jnp.where(mask, dt_ll, 0.)
        mark_ll = jnp.where(mask, mark_ll, 0.)
        ll = dt_ll + mark_ll
        return ll, dt_ll, mark_ll
    
    
    def _predict(self, h: Float[Array, "hdim"], dt_max: float):
        int_tf0 = jnp.asarray(0., dtype=jnp.float32)
        def func(t, int_tf, args=None):
            temp = self.h_stack(h) # (num_types+1, )
            t_temp = self.omega * t
            h_temp = temp[-1]
            term1 = h_temp + t_temp
            term2 = 1 / self.omega * jnp.exp(h_temp)*(1-jnp.exp(t_temp))
            dt_ll = term1 + term2
            f_dt = jnp.exp(dt_ll)
            tf = t * f_dt
            return tf
        Efdt = integrate(func, 0., dt_max, int_tf0, None)
        mark_val = self.h_stack(h)[:-1]
        mark_predict = mark_val.argmax(-1)
        return Efdt, mark_predict

    @eqx.filter_jit
    def rolling_predict(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], dt_max: float, key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.)
        embeds = jax.vmap(self.embed)(marks)
        _, hs = self.rnn(dts, embeds) # (T, hdim)
        dt_predict, mark_predict = jax.vmap(self._predict, (0, None))(hs[:-1], dt_max) # (T-1, )
        return (dt_predict, mark_predict), (dts[1:], marks[1:]), mask[1:]