import equinox as eqx
from jaxtyping import Array, Float, Int, Bool, PyTree
import jax.numpy as jnp
import jax
from .modules.utils import forward_pass, get_dts
from .modules.ode import integrate_sde

def init_linear(linear: eqx.nn.Linear, sigma: float, key):
    key1, key2 = jax.random.split(key)
    weight_shape = linear.weight.shape
    bias_shape = linear.bias.shape
    weight = jax.random.normal(key1, weight_shape) * sigma
    bias = jax.random.uniform(key2, bias_shape, minval=-sigma, maxval=sigma)
    linear = eqx.tree_at(lambda l: l.weight, linear, weight)
    linear = eqx.tree_at(lambda l: l.bias, linear, bias)
    return linear

class MLP(eqx.Module):
    layers: list

    def __init__(self, dim_in, dim_out, dim_hidden, num_hidden, key):
        keys = jax.random.split(key, num_hidden + 1)
        linear = eqx.nn.Linear(dim_in, dim_hidden, key=keys[-1])
        linear = init_linear(linear, .01, keys[-1])
        self.layers = [linear, jax.nn.tanh]
        for i in range(num_hidden-1):
            linear = eqx.nn.Linear(dim_hidden, dim_hidden, key=keys[i])
            linear = init_linear(linear, .01, keys[i])
            self.layers.append(linear)
            self.layers.append(jax.nn.tanh)
        linear = eqx.nn.Linear(dim_hidden, dim_out, key=keys[-2])
        linear = init_linear(linear, .01, keys[-2])
        self.layers.append(linear)
        self.layers.append(jax.nn.tanh)


    def __call__(self, x: Float[Array, "dim_in"]):
        x = forward_pass(self.layers, x)
        return x
    
class Func(eqx.Module):
    net: MLP

    def __init__(self, num_types, dim_hidden, num_hidden, key):
        self.net = MLP(num_types, num_types, dim_hidden, num_hidden, key)

    def __call__(self, t, y, args=None):
        return self.net(y)
    
# class IntensityFunc(eqx.Module):
#     func: Func

#     def __init__(self, num_types, dim_hidden, num_hidden, key):
#         self.func = Func(num_types, num_types, dim_hidden, num_hidden, key)

#     def __call__(self, t, state, args):
#         Lambda, log_intensities = state
#         intensity = jnp.exp(log_intensities).sum()
#         d_log_intensities = self.func(t, log_intensities, args)
#         return intensity, d_log_intensities    

class Encoder(eqx.Module):
    f: Func
    g: Func
    h: MLP
    init_state: Array
    num_types: int
    num_steps: int

    def __init__(self, num_types, dim_hidden, num_hidden, num_steps: int, key):
        key1, key2, key3, key4 = jax.random.split(key, 4)
        self.f = Func(num_types, dim_hidden, num_hidden, key1)
        self.g = Func(num_types, dim_hidden, num_hidden, key2)
        self.h = MLP(num_types, num_types*num_types, dim_hidden, num_hidden, key3)
        self.init_state = jax.random.normal(key4, (num_types, )) * 0.1
        self.num_types = num_types
        self.num_steps = num_steps

    def step(self, state0: PyTree, dt: Float[Array, ""], mark: Int[Array, ""], key):
        Lambda0, eta0 = state0
        eta1, etas, sub_key = EulerSolver(self.f, self.g, eta0, dt, self.num_steps, key)
        temp = self.h(eta1).reshape(self.num_types, self.num_types)
        eta_post = eta1 + temp[mark]
        all_intensities = jnp.exp(etas) # (steps, num_types)
        all_intensity = all_intensities.sum(-1) # (steps, )
        mean_intensity = all_intensity.mean()
        Lambda1 = Lambda0 + mean_intensity * dt
        return Lambda1, eta1, eta_post, sub_key
    
    def extrapolate_multiple(self, eta0: Float[Array, "num_types"], dts: Float[Array, "num_samples"], key):
        h0 = (eta0, key)
        xs = dts
        def step(h, x):
            eta, key = h
            dt = x
            eta1, _, sub_key = EulerSolver(self.f, self.g, eta, dt, 10, key)
            carry = (eta1, sub_key)
            y = eta1
            return carry, y
        carry, ys = jax.lax.scan(step, h0, xs) # ys: (num_samples, num_types)
        etas = ys
        return  etas

    def __call__(self, dts: Float[Array, "T"], marks: Int[Array, "T"], key):
        state0 = (jnp.asarray(0.), self.init_state, key)
        inputs = (dts, marks)
        def step(h, x):
            dt, mark = x
            Lambda0, eta0, cur_key = h
            Lambda1, eta1, eta_post, sub_key = self.step((Lambda0, eta0), dt, mark, cur_key)
            carry = (Lambda1, eta_post, sub_key)
            y = (eta1, eta_post)
            return carry, y
        carry, ys = jax.lax.scan(step, state0, inputs)
        Lambda, eta_end, _ = carry
        eta_priors, eta_posts = ys # (T, num_types)
        return eta_posts, eta_priors, Lambda, eta_end

def EulerSolver(drift, control, eta0: Float[Array, "num_types"], dt: Float[Array, ""], steps: int, key):
    ts = jnp.linspace(0., dt, steps+1)[1:] # (steps, )
    h0 = (jnp.asarray(0.), eta0, key)
    def step(h, x):
        t, eta, cur_key = h
        t1 = x
        interval = t1-t
        cur_key, sub_key = jax.random.split(cur_key)
        eta1 = eta + drift(t, eta) * interval + control(t, eta)*jax.random.normal(cur_key, eta.shape)*jnp.sqrt(interval)
        carry = (t1, eta1, sub_key)
        y = eta
        return carry, y
    carry, ys = jax.lax.scan(step, h0, ts)
    _, eta1, sub_key = carry
    etas = ys # (steps, num_types)
    return eta1, etas, sub_key
    
class NJSDE(eqx.Module):
    encoder: Encoder

    def __init__(self, num_types, dim_hidden, num_hidden, num_steps: int, key):
        self.encoder = Encoder(num_types, dim_hidden, num_hidden, num_steps, key)


    @eqx.filter_jit
    def __call__(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], key):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.) # (T)
        log_intensities_posts, log_intensities_priors, Lambda, log_intensities_end = self.encoder(dts, marks, key)
        # log_intensities = jnp.where(mask[1:], log_intensities_priors[1:], 0.) # (T-1, num_types)
        log_intensity, mark_ll = jax.vmap(get_ll)(log_intensities_priors[1:], marks[1:], mask[1:]) # (T-1, ), (T-1, )
        mark_ll = mark_ll.sum()
        ll = log_intensity.sum() - Lambda
        time_ll = ll - mark_ll
        return ll, time_ll, mark_ll
    
    def _predict(self, eta0: Float[Array, "num_types"], dt_max: Float[Array, ""], key):
        eta1, etas, sub_key = EulerSolver(self.encoder.f, self.encoder.g, eta0, dt_max, self.encoder.num_steps, key)
        etas = jnp.concatenate((etas, eta1[None]))
        intensities = jnp.exp(etas) # (steps+1, num_types) 0~steps
        intensity = intensities.sum(-1) # (steps+1, ) 0~steps
        mid_intensity = (intensity[1:] + intensity[:-1]) / 2 # (steps, ) segments before 1~steps
        step_size = dt_max / self.encoder.num_steps # ()
        Lambda_segments = mid_intensity * step_size # (steps,) segments before 1~steps
        Lambda = Lambda_segments.cumsum(-1) # (steps, ) 1~steps
        Lambda = jnp.concatenate((jnp.asarray([0,]), Lambda)) # (steps+1, ) 0~steps
        ft = intensity * jnp.exp(-Lambda) # (steps+1, ) 0~steps
        t = jnp.linspace(0., dt_max, self.encoder.num_steps+1)
        t_ft = t * ft # (steps+1, ) 0~steps
        mid_t_ft = (t_ft[1:] + t_ft[:-1]) / 2 # (steps, ) segments before 1~steps
        Efdt = (mid_t_ft * step_size).sum()
        eta1, etas, sub_key = EulerSolver(self.encoder.f, self.encoder.g, eta0, Efdt, self.encoder.num_steps, sub_key)

        mark_predict = eta1.argmax(-1)
        return Efdt, mark_predict
    
    @eqx.filter_jit
    def encode(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], key):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.) # (T)
        eta_posts, _, _, _ = self.encoder(dts, marks, key)
        return eta_posts

    @eqx.filter_jit
    def intensities_at(self, eta0: Float[Array, "num_types"], dts: Float[Array, "num_samples"], key):
        etas = self.encoder.extrapolate_multiple(eta0, dts, key)
        intensities = jnp.exp(etas) # (num_samples, num_types)
        return intensities

    @eqx.filter_jit
    def rolling_predict(self, ts: Float[Array, "T"], marks: Int[Array, "T"], mask: Bool[Array, "T"], dt_max: float, key=None):
        dts = get_dts(ts)
        dts = jnp.where(mask, dts, 0.).clip(min=0.) # (T)
        eta_posts, eta_priors, Lambda, eta_end = self.encoder(dts, marks, key)
        dt_predicts, mark_predicts = jax.vmap(self._predict, (0, None, 0))(eta_posts[:-1], dt_max, jax.random.split(key, ts.shape[0]-1))
        return (dt_predicts, mark_predicts), (dts[1:], marks[1:]), mask[1:]
       
def get_ll(log_intensities: Float[Array, "num_types"], mark: Int[Array, ""], mask: Bool[Array, ""]):
    log_intensity = log_intensities[mark] # ()
    mark_ll = jax.nn.log_softmax(log_intensities, -1)[mark] # ()
    log_intensity = jnp.where(mask, log_intensity, 0.)
    mark_ll = jnp.where(mask, mark_ll, 0.)  # (T-1)
    return log_intensity, mark_ll