from functools import partial
from typing import Callable, List, Union

import jax
import jax.numpy as jnp

# In JAX, models are often represented as a combination of a parameter structure (a PyTree)
# and functions that operate on those parameters. We'll define a generic `ModelApply`
# function type to represent the model's forward pass.
from flax.core import FrozenDict
from jax import random
from jax.lax import scan

PyTree = Union[FrozenDict, dict]
ModelApply = Callable[[PyTree, jnp.ndarray, jnp.ndarray, jnp.ndarray], jnp.ndarray]


def func_transport_ratio_linear(ratio: jnp.ndarray) -> jnp.ndarray:
    """Linear transformation for transport ratio function"""
    return ratio


def func_transport_ratio_cosine_1(ratio: jnp.ndarray) -> jnp.ndarray:
    """Cosine transformation for transport ratio function"""
    return 0.5 * (1.0 - jnp.cos(jnp.pi * ratio))


def func_transport_ratio_cosine_2(ratio: jnp.ndarray) -> jnp.ndarray:
    """Double cosine transformation for transport ratio function"""
    ratio = 0.5 * (1.0 - jnp.cos(jnp.pi * ratio))
    ratio = 0.5 * (1.0 - jnp.cos(jnp.pi * ratio))
    return ratio


TRANSPORT_FUNCS = {
    "linear": func_transport_ratio_linear,
    "cosine_1": func_transport_ratio_cosine_1,
    "cosine_2": func_transport_ratio_cosine_2,
}


def transport_interpolate(
    tensor_src: jnp.ndarray,
    tensor_dst: jnp.ndarray,
    tensor_ratio: jnp.ndarray,
    transport_func: Callable[[jnp.ndarray], jnp.ndarray] = func_transport_ratio_linear,
) -> jnp.ndarray:
    """Perform transport interpolation
    Args:
        tensor_src: Source samples (size_batch, size_length, size_channel).
        tensor_dst: Target samples (size_batch, size_length, size_channel).
        tensor_ratio: Transport ratio in [0:1] (size_batch, 1).
        transport_func: Transformation function for the transport ratio.
    Returns:
        Interpolated samples between source and
            target according to given ratio (size_batch, size_length, size_channel).
    """
    # (size_batch, 1)
    alpha = transport_func(tensor_ratio)
    # (size_batch, 1, 1) to allow broadcasting
    alpha = jnp.expand_dims(alpha, axis=2)
    return (1.0 - alpha) * tensor_src + alpha * tensor_dst


def train_loss(
    model_apply: ModelApply,
    model_params: PyTree,
    rng: jnp.ndarray,
    traj_src: jnp.ndarray,
    tensor_cond: Union[jnp.ndarray, None],
    traj_dst: jnp.ndarray,
    weights: Union[jnp.ndarray, None] = None,
    transport_func: Callable[[jnp.ndarray], jnp.ndarray] = func_transport_ratio_linear,
) -> jnp.ndarray:
    """Compute transport flow training loss.
    Args:
        model_apply: The model's apply function.
        model_params: The model's parameters.
        rng: JAX random key.
        traj_src: Source trajectory samples (size_batch, size_length, size_channel).
        tensor_cond: Optional conditioning tensor (size_batch, size_cond), or None.
        traj_dst: Target trajectory samples (size_batch, size_length, size_channel).
        weights: Optional weights applied to batched loss errors (size_batch).
        transport_func: Transformation function for the transport ratio.
    Returns:
        Scalar loss tensor (1).
    """
    size_batch = traj_src.shape[0]
    if weights is None:
        weights = jnp.ones(size_batch)

    weights = jnp.expand_dims(jnp.expand_dims(weights, axis=1), axis=2)

    rng, sample_rng = random.split(rng)
    tensor_ratio = random.uniform(sample_rng, (size_batch, 1))

    traj_blend = transport_interpolate(traj_src, traj_dst, tensor_ratio, transport_func)

    # Note: In Flax, the model's apply function is what's used for the forward pass.
    # We assume the model takes the blended trajectory, ratio, and condition as input.
    traj_delta = model_apply(model_params, traj_blend, tensor_ratio, tensor_cond)

    loss = jnp.mean(weights * jnp.power(traj_delta - (traj_dst - traj_src), 2))
    return loss


@partial(
    jax.jit,
    static_argnames=("model_apply", "steps", "transport_func", "func_postprocess"),
)
def transport_forward(
    model_apply: ModelApply,
    model_params: PyTree,
    traj_src: jnp.ndarray,
    tensor_cond: Union[jnp.ndarray, None],
    steps: Union[int, jnp.ndarray],
    transport_func: Callable[[jnp.ndarray], jnp.ndarray] = func_transport_ratio_linear,
    func_postprocess: Union[Callable[[jnp.ndarray], jnp.ndarray], None] = None,
) -> List[jnp.ndarray]:
    # TODO: 清理混乱无用的入参，简化接口
    """Compute forward flow transportation from source to target distribution.

    NOTE: This function is JIT-compiled for performance. It serves as a pure
    computational backend. For architectural consistency, high-level agent methods
    should handle state and logic, then call this function.

    Args:
        model_apply: The model's apply function.
        model_params: The model's parameters.
        traj_src: Initial source trajectory sample (size_batch, size_length, size_channel).
        tensor_cond: Optional conditioning tensor (size_batch, size_cond), or None.
        steps: If is an integer, the number of transport steps.
            If is a 1d tensor (size_batch, steps+1), provide non uniformly spaced
            steps between 0.0 and 1.0.
        transport_func: Transformation function for the transport ratio.
        func_postprocess: Callback function used to post-process and update
            the batched trajectory (size_batch, size_length, size_channel)
            at the end of each transport step.
    Returns:
        The chain of all intermediate trajectory tensors
        (size_batch, size_length, size_channel) in a list of length steps.
    """
    size_batch = traj_src.shape[0]

    if isinstance(steps, int):
        ratio_steps = jnp.linspace(0.0, 1.0, steps + 1)
        ratio_steps = jnp.tile(ratio_steps, (size_batch, 1))
    else:
        ratio_steps = steps

    def step_fn(carry, k):
        traj_src_k = carry
        ratio = ratio_steps[:, k].reshape(-1, 1)

        alpha_1 = transport_func(ratio_steps[:, k].reshape(-1, 1))
        alpha_2 = transport_func(ratio_steps[:, k + 1].reshape(-1, 1))

        alpha_1 = jnp.expand_dims(alpha_1, axis=2)
        alpha_2 = jnp.expand_dims(alpha_2, axis=2)

        delta = model_apply(model_params, traj_src_k, ratio, tensor_cond)

        traj_src_k_plus_1 = traj_src_k + (alpha_2 - alpha_1) * delta

        if func_postprocess is not None:
            traj_src_k_plus_1 = func_postprocess(traj_src_k_plus_1)

        return traj_src_k_plus_1, traj_src_k_plus_1

    # The initial carry is the starting trajectory
    # We scan over the number of steps minus one.
    final_traj, chain = scan(step_fn, traj_src, jnp.arange(ratio_steps.shape[1] - 1))

    # The chain from scan will have shape (num_steps-1, batch, length, channels)
    # We need to prepend the initial state and then transpose to get (num_steps, batch, ...)
    # and then convert to a list of arrays.

    # Prepend initial state
    full_chain = jnp.concatenate([jnp.expand_dims(traj_src, axis=0), chain], axis=0)

    # Unstack to a list of arrays
    return [full_chain[i] for i in range(full_chain.shape[0])]


def _guidance_step(
    carry: jnp.ndarray,
    iter_idx: int,
    *,
    model_apply: ModelApply,
    model_params: PyTree,
    cond: jnp.ndarray,
    prev_chunk: jnp.ndarray,
    W: jnp.ndarray,
    beta: float,
    n_steps: int,
) -> tuple[jnp.ndarray, jnp.ndarray]:
    """
    Performs a single guidance step for Real-Time Correction (RTC).

    This function is designed to be used with `jax.lax.scan`. It takes the
    current state of the action chunk (`A_tau`) and applies one step of
    pseudo-inverse guided denoising.

    Args:
        carry: The current action chunk being denoised, A_tau.
               Shape: (B, H, M).
        iter_idx: The current step index of the ODE solve (from 0 to n_steps-1).
        model_apply: The forward pass function of the underlying model.
        model_params: The parameters of the model.
        cond: The conditioning vector.
        prev_chunk: The action chunk from the previous inference call, used for guidance.
        W: The soft-mask weights for guidance. Shape: (1, H, 1).
        beta: The guidance weight clipping value.
        n_steps: The total number of denoising steps.

    Returns:
        A tuple (A_next, A_next), where A_next is the action chunk after one
        guidance step. The format is for `jax.lax.scan`.
    """
    A_tau = carry
    # Current time step for the ODE, from 0 to 1.
    tau = iter_idx / n_steps
    tau_jax = jnp.ones((A_tau.shape[0], 1)) * tau

    # 1. Define the denoising function and compute its VJP.
    # This simultaneously gives us the denoised estimate `A1_hat` and the function `vjp_fn`
    # to compute the guidance gradient. This avoids a redundant `model_apply` call.
    def denoising_fn(x):
        v = model_apply(model_params, x, tau_jax, cond)
        return x + (1 - tau) * v

    A1_hat, vjp_fn = jax.vjp(denoising_fn, A_tau)

    # 2. Recover the original velocity prediction `v_tau` from the denoised estimate.
    # (1 - tau) is guaranteed to be non-zero because tau is always < 1 (iter_idx < n_steps).
    v_tau = (A1_hat - A_tau) / (1 - tau)

    # 3. Calculate the guidance error.
    # The error is masked by W, which applies stronger guidance to the start
    # of the chunk for smooth transitions.
    err = (prev_chunk - A1_hat) * W  # Element-wise, with broadcasting

    # 4. Calculate the VJP to get the guidance gradient.
    # This computes `(err^T * J)` where `J` is the Jacobian of the denoising function.
    # It tells us how to change A_tau to reduce the error.
    (grad_guidance,) = vjp_fn(err)

    # 5. Calculate the guidance weight `w_guided` with clipping, as proposed in the paper.
    # Clipping with beta is crucial for stability with few denoising steps.
    r2 = (1 - tau) ** 2 / (tau**2 + (1 - tau) ** 2)
    # Add a small epsilon to avoid division by zero at tau=0.
    w_guided = jnp.minimum(beta, (1 - tau) / (tau * r2 + 1e-8))

    # 6. Apply the update: one Euler step with the corrected velocity.
    A_next = A_tau + (v_tau + w_guided * grad_guidance) * (1 / n_steps)

    return A_next, A_next


@partial(jax.jit, static_argnames=("model_apply", "n_steps"))
def transport_guidance(
    *,
    model_apply: ModelApply,
    model_params: PyTree,
    noise: jnp.ndarray,
    cond: jnp.ndarray,
    prev_chunk: jnp.ndarray,
    W: jnp.ndarray,
    n_steps: int = 5,
    beta: float = 5.0,
) -> jnp.ndarray:
    """
    Performs a full, multi-step forward flow process with pseudo-inverse guidance
    for Real-Time Correction (RTC).

    This function is JIT-compiled for performance.

    Args:
        model_apply: The model's apply function (static).
        model_params: The model's parameters.
        noise: The initial random noise. Shape: (B, H, M).
        cond: The conditioning vector.
        prev_chunk: The guidance target chunk. Shape: (B, H, M).
        W: The guidance weight mask. Shape: (1, H, 1).
        n_steps: The number of denoising steps (static).
        beta: The guidance weight clipping value.

    Returns:
        The final denoised action chunk. Shape: (B, H, M).
    """
    # Prepare the step function for jax.lax.scan by partially applying the static arguments.
    step_fn = partial(
        _guidance_step,
        model_apply=model_apply,
        model_params=model_params,
        cond=cond,
        prev_chunk=prev_chunk,
        W=W,
        beta=beta,
        n_steps=n_steps,
    )

    # Run the scan loop over the number of steps.
    # The carry starts as the initial noise A0.
    final_chunk, _ = scan(step_fn, noise, jnp.arange(n_steps))

    return final_chunk


@partial(jax.jit, static_argnames=("v_field_apply", "num_steps"))
def solve_ode_midpoint(
    v_field_apply: Callable,
    v_field_params: PyTree,
    x_0: jnp.ndarray,
    s_latent: jnp.ndarray,
    a: jnp.ndarray,
    g_latent: jnp.ndarray,
    num_steps: int,
) -> jnp.ndarray:
    """
    Solves the ODE dx/dt = v(x, t, s, a, g) from t=0 to t=1 using the Midpoint Method.

    This function is JIT-compiled for performance and is designed to be called
    by the TDFlowAgent for waypoint planning.

    Args:
        v_field_apply: The apply function of the learned vector field model.
        v_field_params: The parameters of the vector field model.
        x_0: The starting point of the integration (typically random noise).
        s_latent: The latent starting state (conditioning).
        a: The action taken from the start state (conditioning).
        g_latent: The latent goal state (conditioning).
        num_steps: The number of integration steps.

    Returns:
        The final state x_1, which is the result of the integration at t=1.
    """
    dt = 1.0 / num_steps

    def scan_body(xt, i):
        t_curr = jnp.ones((xt.shape[0], 1)) * (i * dt)

        # k1 calculation (velocity at the start of the step)
        k1 = v_field_apply(
            {"params": v_field_params}, xt, t_curr, s_latent, a, g_latent
        )

        # x_mid calculation (position at the midpoint of the step)
        x_mid = xt + 0.5 * dt * k1
        t_mid = t_curr + 0.5 * dt

        # k2 calculation (velocity at the midpoint)
        k2 = v_field_apply(
            {"params": v_field_params}, x_mid, t_mid, s_latent, a, g_latent
        )

        # Final step update
        xt_new = xt + dt * k2
        return xt_new, None  # Return new state, no scan output needed

    # Use jax.lax.scan for an efficient, JIT-compatible loop.
    # The initial carry is x_0.
    final_xt, _ = scan(scan_body, x_0, jnp.arange(num_steps))
    return final_xt
