import jax
import jax.numpy as jnp
import equinox as eqx
from jaxtyping import Array, Float, Int, Bool, PyTree
from .modules.utils import forward_pass, get_dts # , pad_to_len
from .modules.transformer import Transformer
from .modules.temporal_enc import ShiftedTemporalEnc

class RNN(eqx.Module):
    hidden_layer: list
    hdim: int

    def __init__(self, hdim: int, embed_dim: int, key: Array):
        self.hidden_layer = [
            eqx.nn.Linear(embed_dim+hdim+1, hdim, key=key),
            jax.nn.relu # Use ReLU instead of exp to avoid overflow
        ]
        self.hdim = hdim

    @eqx.filter_jit
    def step(self, h: Float[Array, "hdim"], embed: Float[Array, "embed_dim"], dt: Float[Array, ""]):
        temp = jnp.concatenate((h, embed, dt[None]), -1) # (hdim+embed_dim+1)
        out = forward_pass(self.hidden_layer, temp)
        return out
    
    def __call__(self, dts: Float[Array, "T"], embeds: Float[Array, "T embed_dim"]):
        h = jnp.zeros(self.hdim)
        xs = (dts, embeds)
        def f(h, x): # This is the step function for scan. Takes (carry, input). Gives (carry, output)
            dt, embed = x # dt: ()  embed: (embed_dim,)
            h = self.step(h, embed, dt)
            return h, h
        carry, hs = jax.lax.scan(f, h, xs)
        return carry, hs

def init_linear_positive(in_dim, out_dim, key):
    linear = eqx.nn.Linear(in_dim, out_dim, key=key)
    linear = eqx.tree_at(lambda l: l.weight, linear, replace_fn=lambda w: jnp.abs(w))
    return linear

class CumulHazardFunctionNetwork(eqx.Module):
    layer1: eqx.Module
    layer2: eqx.Module
    mlp_layers: list
    layer3: list

    def __init__(self, hdim: int, layers: int, num_types: int, key: Array):
        keys = jax.random.split(key, layers+2)
        self.layer1 = init_linear_positive(1, hdim, key=keys[-1])
        self.layer2 = init_linear_positive(hdim*2, hdim, key=keys[-2])
        self.mlp_layers = [
            init_linear_positive(hdim, hdim, key=keys[i])
            for i in range(layers-1)
        ]
        self.layer3 = [
            init_linear_positive(hdim, num_types, key=keys[-3]),
            jax.nn.softplus
        ]

    @eqx.filter_jit
    def __call__(self, dt: Float[Array, "[]"], h: Float[Array, "hdim"]):
        temp = self.layer1(dt[None]) # (hdim, )
        temp = jnp.concatenate((h, temp), -1) # (2*hdim, )
        temp = jax.nn.tanh(self.layer2(temp))
        for layer in self.mlp_layers:
            temp = jax.nn.tanh(layer(temp))
        Lambda = forward_pass(self.layer3, temp) # num_types
        sum_Lambda = Lambda.sum() # ()
        return sum_Lambda

class FullyNN(eqx.Module):
    enc: RNN
    net: CumulHazardFunctionNetwork
    embed: eqx.Module

    def __init__(self, hdim: int, num_types: int, embed_dim: int, nlayers: int, key: Array):
        key1, key2, key3 = jax.random.split(key, 3)
        self.enc = RNN(hdim, embed_dim, key1)
        self.net = CumulHazardFunctionNetwork(hdim, nlayers, num_types, key2)
        self.embed = eqx.nn.Embedding(num_types, embed_dim, key=key3)

    def __call__(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.)
        embeds = jax.vmap(self.embed)(marks)
        _, hs = self.enc(dts, embeds)
        dt_ll, _, _ = jax.vmap(self._get_ll)\
                    (hs[:-1], 
                     dts[1:], 
                     marks[1:], 
                     mask[1:]) # (T-1, )
        dt_ll = dt_ll.sum()
        return dt_ll, dt_ll, 0.

    def _get_ll(self, h: Float[Array, "hdim"], dt: Float[Array, ""], mark: Int[Array, ""], mask: Bool[Array, ""]):
        Lambda, grad = eqx.filter_value_and_grad(self.net)(dt, h)
        dt_ll = jnp.log(grad+1e-6) - Lambda
        dt_ll = jnp.where(mask, dt_ll, 0.)
        return dt_ll, dt_ll, 0.
    
    def intensity_at(self, h: Float[Array, "hdim"], dt: Float[Array, ""]):
        _, grad = eqx.filter_value_and_grad(self.net)(dt, h)
        return grad