import jax
import jax.numpy as jnp
import equinox as eqx
from jaxtyping import Array, Float, Int, Bool, PyTree
from .modules.utils import forward_pass, get_dts # , pad_to_len
 
def llm_logp(x: Float[Array, ""],
            locs: Float[Array, "num_components"], 
            log_scales: Float[Array, "num_components"],
            log_weights: Float[Array, "num_components"]):
    logx = jnp.log(x+1e-6)
    temp = jax.scipy.stats.norm.logpdf(logx, locs, jnp.exp(log_scales))
    temp = temp + log_weights
    temp = jax.nn.logsumexp(temp)
    ll = temp - logx
    return ll

def llm_sample(locs: Float[Array, "num_components"], 
               log_scales: Float[Array, "num_components"],
               log_weights: Float[Array, "num_components"],
               key: Array, 
               num_samples: int):
    key1, key2= jax.random.split(key)
    eps = jax.random.normal(key1, (num_samples, ))
    clusters = jax.random.categorical(key2, log_weights, shape=(num_samples, ))
    locs = locs[clusters] # (num_samples, )
    log_scales = log_scales[clusters] # (num_samples, )
    samples = jnp.exp(eps * jnp.exp(log_scales) + locs)
    return samples # (num_samples, )

def siren_forward(linear: eqx.nn.Linear, x: Array, omega: float):
    x = linear(x)
    x = x*omega
    x = jnp.sin(x)
    return x

def get_subsequent_mask(length: int):
    mask = jnp.tril(
        jnp.ones((length, length), dtype=bool), 
    )
    return mask

class SIREN(eqx.Module):
    in_mapping: eqx.nn.Linear
    hiddens: list
    out_mapping: eqx.nn.Linear
    omega: float

    def __init__(self, d_hidden: int, hid_num: int, num_channels: int, key: Array, omega: float=1.):
        key1, key2, key3 = jax.random.split(key, 3)
        keys = jax.random.split(key2, hid_num)
        self.in_mapping = eqx.nn.Linear(1, d_hidden, key=key1)
        self.hiddens = [
            eqx.nn.Linear(d_hidden, d_hidden, key=keys[i])
            for i in range(hid_num)
        ]
        self.out_mapping = eqx.nn.Linear(d_hidden, num_channels, key=key3)
        self.omega = omega

    def __call__(self, tau: Float[Array, "[]"]):
        x = siren_forward(self.in_mapping, jnp.broadcast_to(tau, (1, )), self.omega)
        for linear in self.hiddens:
            x = siren_forward(linear, x, self.omega)
        x = siren_forward(self.out_mapping, x, self.omega)
        return x # (num_channels, )
    
class ConvLayer(eqx.Module):
    siren: SIREN
    horizon: list
    out_mapping: eqx.nn.Linear
    norm: eqx.nn.LayerNorm

    def __init__(self, d_model: int, d_hid: int, hid_num: int, horizon: list, key: Array, omega: float=1.):
        key1, key2 = jax.random.split(key)
        num_channel = len(horizon)
        self.siren = SIREN(d_hid, hid_num, num_channel, key1, omega)
        self.horizon = horizon
        self.out_mapping = eqx.nn.Linear(num_channel*d_model, d_model, key=key2)
        self.norm = eqx.nn.LayerNorm((d_model, ))

    def __call__(self, feature_seq: Float[Array, "T d_model"], time_seq: Float[Array, "T"]):
        rel_tau = time_seq[:, None] - time_seq[None, :] # (T, T)
        length = feature_seq.shape[0]
        sub_mask = get_subsequent_mask(length) # （T, T)
        horizon = jnp.asarray(self.horizon) # (num_channels, )
        hor_mask = jnp.logical_and(rel_tau>=0, rel_tau<=horizon[:, None, None]) # (num_channels, T, T)
        mask = jnp.logical_and(sub_mask, hor_mask) # (num_channels, T, T)
        kernel = jax.vmap(self.siren)(rel_tau.flatten()) # (T*T, num_channel)
        kernel = kernel.reshape(length, length, kernel.shape[-1]) # (T, T, num_channel)
        kernel = kernel.transpose((2, 0, 1)) # (num_channels, T, T)
        kernel = jnp.where(mask, kernel, 0.) # (num_channels, T, T)
        # kernel = normalize(kernel)
        x = jnp.matmul(kernel, feature_seq[None, :, :]) # (num_channels, T, d_model)
        x = x.transpose((1, 0, 2)).reshape(length, horizon.shape[0]*feature_seq.shape[-1]) # (T, num_channels*d_model)
        x = jax.vmap(self.out_mapping)(x) # (T, d_model)
        out = jax.vmap(self.norm)(x+feature_seq)
        return out
    
class SIREN_Conv(eqx.Module):
    layers: list

    def __init__(self, d_model: int, siren_hid: int, siren_hid_num: int, horizon: list, key:Array, omega: float=1.):
        keys = jax.random.split(key, len(horizon))
        self.layers = [
            ConvLayer(d_model, siren_hid, siren_hid_num, horizon[i], keys[i], omega)
            for i in range(len(horizon))
        ]

    def __call__(self, feature_seq: Float[Array, "T d_model"], time_seq: Float[Array, "T"]):
        x = feature_seq
        for layer in self.layers:
            x = layer(x, time_seq)
        return x
    
class ConvGRU(eqx.Module):
    conv: SIREN_Conv
    cell: eqx.nn.GRUCell
    hdim: int

    def __init__(self, hdim: int, embed_dim: int, horizon: list, omega: float, siren_hid_layers: int, key: Array):
        key1, key2 = jax.random.split(key)
        self.cell = eqx.nn.GRUCell(embed_dim+1, hdim, key=key1)
        self.conv = SIREN_Conv(hdim, hdim, siren_hid_layers, horizon, key2, omega)
        self.hdim = hdim

    def __call__(self, dts: Float[Array, "T"], embeds: Float[Array, "T embed_dim"]):
        ts = jnp.cumsum(dts, -1) # (T, )

        X = self.conv(embeds, ts)
        xs = (dts, X[:dts.shape[0]])
        h_init = jnp.zeros(self.hdim)
        def f(h, x): # This is the step function for scan. Takes (carry, input). Gives (carry, output)
            dt, embed = x
            h = self.rnn_step(h, embed, dt)
            return h, h
        carry, hs = jax.lax.scan(f, h_init, xs)
        return carry, hs

    def rnn_step(self, h: Float[Array, "hdim"], embed: Float[Array, "embed_dim"], dt: Float[Array, ""]):
        log_dt = jnp.log(dt.clip(1e-6))
        x = jnp.concatenate((embed, log_dt[None])) # (embed_dim+1, )
        h = self.cell(x, h)
        return h

    def step(self, h: Float[Array, "hdim"], embed: Float[Array, "embed_dim"], dt: Float[Array, ""], history: tuple):
        dts, embeds = history
        dts = jnp.concatenate((dts, dt[None])) # (T+1, )
        ts = jnp.cumsum(dts, -1) # (T+1, )
        embeds = jnp.concatenate((embeds, embed[None, :]), 0) # (T+1, embed_dim)
        X = self.conv(embeds, ts)
        h = self.rnn_step(h, X[-1], dt)
        return h
    
class CTPP(eqx.Module):
    enc: ConvGRU
    stack: eqx.Module
    embed: eqx.Module
    num_component: int
    num_types: int
    hdim: int

    def __init__(self, hdim: int, num_types: int, embed_dim: int, num_components: int, horizon: list, 
                 omega: float, siren_hid_layers: int, key: Array):
        key1, key2, key3 = jax.random.split(key, 3)
        self.enc = ConvGRU(hdim, embed_dim, horizon, omega, siren_hid_layers, key1)
        self.stack = eqx.nn.Linear(hdim, 3*num_components+num_types, key=key2)
        self.embed = eqx.nn.Embedding(num_types, embed_dim, key=key3)
        self.num_component = num_components
        self.num_types = num_types
        self.hdim = hdim

    def __call__(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.)
        embeds = jax.vmap(self.embed)(marks)
        _, hs = self.enc(dts, embeds)
        ll, dt_ll, mark_ll = jax.vmap(self._get_ll)\
                    (hs[:-1], 
                     dts[1:], 
                     marks[1:], 
                     mask[1:]) # (T-1, )
        ll = ll.sum()
        dt_ll = dt_ll.sum()
        mark_ll = mark_ll.sum()
        return ll, dt_ll, mark_ll

    def _get_ll(self, h: Float[Array, "hdim"], dt: Float[Array, ""], mark: Int[Array, ""], mask: Bool[Array, ""]):
        locs, log_scales, log_weights, mark_logits = self._get_dist(h)
        dt_ll = llm_logp(dt, locs, log_scales, log_weights)
        mark_ll = mark_logits[mark]
        dt_ll = jnp.where(mask, dt_ll, 0.)
        mark_ll = jnp.where(mask, mark_ll, 0.)
        ll = dt_ll + mark_ll
        return ll, dt_ll, mark_ll

    def _get_dist(self, h: Float[Array, 'hdim']):
        param = self.stack(h) # (3*num_components+num_types)
        t_param = param[:3*self.num_component]
        mark_param = param[3*self.num_component:]
        locs, log_scales, log_weights = jnp.split(t_param, 3, -1)
        log_weights = jax.nn.log_softmax(log_weights, axis=-1) # (num_components, )
        mark_logits = jax.nn.log_softmax(mark_param, -1)
        return locs, log_scales, log_weights, mark_logits

    def _predict(self, h: Float[Array, "hdim"], dt_max: Float[Array, ""]):
        locs, log_scales, log_weights, mark_logits = self._get_dist(h) # (num_components, )
        means = jnp.exp(locs + jnp.exp(log_scales)**2/2) # (num_components, )
        means_weight = means * jnp.exp(log_weights) # (num_components, )
        Efdt = means_weight.sum() # ()
        Efdt = jnp.clip(Efdt, max=dt_max)
        mark_predict = mark_logits.argmax(-1) # ()
        return Efdt, mark_predict
    
    @eqx.filter_jit
    def rolling_predict(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], dt_max: float, key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.)
        embeds = jax.vmap(self.embed)(marks)
        _, hs = self.enc(dts, embeds)
        dt_predict, mark_predict = jax.vmap(self._predict, (0, None))(hs[:-1], dt_max)
        return (dt_predict, mark_predict), (dts[1:], marks[1:]), mask[1:]