from functools import partial
from typing import Any, Dict, List, Tuple

import jax
import jax.numpy as jnp
import optax
from flax import linen as nn
from flax.training import train_state

from . import flow_transport, models


class TrainState(train_state.TrainState):
    ema_params: Any


class GCRLAgentBase:
    """Base implementation for Flow Matching
    Goal Conditioned Reinforcement Learning (JAX version)
    """

    def __init__(
        self,
        config: Dict,
        size_latent_obs: int,  # New parameter
        size_act: int,
        rng: jnp.ndarray,
    ):
        self._config = config
        self._size_act = size_act
        self._rng = rng
        self._size_latent_obs = size_latent_obs  # Directly assign

        # This is a placeholder. Subclasses will define the actual models.
        self.actor_state: TrainState = None
        self.critic1_state: TrainState = None
        self.critic2_state: TrainState = None
        self.planner_state: TrainState = None
        self.world_state: TrainState = None

        self._flow_transport_func = flow_transport.TRANSPORT_FUNCS["linear"]

    # _create_model_encoder method removed from here, will be in GCRLAgent0

    def _create_model_flow(
        self, name, size_channel: int, size_length: int, size_cond: int
    ) -> nn.Module:
        if name == "mlp":
            return models.ModelDenseSimple(
                size_channel=size_channel,
                size_length=size_length,
                size_cond=size_cond,
                size_hidden_list=self._config["model_flow_mlp_hidden"],
            )
        elif name == "unet":
            return models.ModelUnetResidualConditional(
                size_channel=size_channel,
                size_emb_transport=self._config["model_flow_unet_emb_size"],
                size_cond=size_cond,
                size_channel_hidden=self._config["model_flow_unet_hidden"],
                period_min=self._config["model_flow_unet_emb_period_min"],
                period_max=self._config["model_flow_unet_emb_period_max"],
                size_kernel=3,
                size_group_norm=8,
            )
        else:
            raise NotImplementedError(f"Flow model {name} not implemented")

    def _init_train_state(
        self, model: nn.Module, dummy_input: Tuple[jnp.ndarray, ...]
    ) -> TrainState:
        self._rng, init_rng = jax.random.split(self._rng)
        variables = model.init(init_rng, *dummy_input)

        # Handle models with no parameters (like Identity)
        params = variables.get("params", {})

        tx = optax.adamw(
            learning_rate=self._config["learning_rate"],
            weight_decay=self._config["weight_decay"],
        )

        ema_decay = self._config.get("ema_decay", 0.999)
        ema_state = optax.ema(decay=ema_decay).init(params)

        return TrainState.create(
            apply_fn=model.apply,
            params=params,
            tx=tx,
            ema_params=ema_state,
        )

    # The following methods need to be implemented by subclasses
    def train(self, *args, **kwargs):
        raise NotImplementedError

    def inference(self, *args, **kwargs):
        raise NotImplementedError


class GCRLAgent0(GCRLAgentBase):
    def __init__(
        self,
        config: Dict,
        encoder_config: List[Tuple[str, str, int, int]],
        size_latent_obs: int,  # New parameter
        size_act: int,
        rng: jnp.ndarray,
    ):
        super().__init__(
            config, size_latent_obs, size_act, rng
        )  # Pass size_latent_obs to base

        # Create and initialize encoder model and its state
        # Assuming only one encoder for simplicity, as per original config
        if len(encoder_config) != 1:
            raise ValueError("GCRLAgent0 currently supports only one encoder.")

        label, name, size_in, size_out = encoder_config[0]
        self._rng, model_rng = jax.random.split(self._rng)
        self.encoder_model = self._create_model_encoder(
            name=name, size_in=size_in, size_out=size_out
        )
        dummy_input = jnp.ones((1, size_in))
        self.encoder_state = self._init_train_state(self.encoder_model, (dummy_input,))
        # Ensure the size_latent_obs passed matches the encoder's output
        if size_out != self._size_latent_obs:
            raise ValueError(
                f"Encoder output size ({size_out}) does not match size_latent_obs ({self._size_latent_obs})."
            )

        # Create the actor model
        self.actor_model = self._create_model_flow(
            name="unet",
            size_channel=self._size_act,
            size_length=self._config["trajs_act_len"],
            size_cond=2 * self._size_latent_obs,  # Use the correct latent size
        )

        # Initialize the combined state for actor and encoders
        self._init_combined_state()

        # --- Initialize state for different inference strategies ---
        # For temporal ensembling
        trajs_act_len = self._config["trajs_act_len"]
        inference_horizon = self._config["inference_horizon"]
        self._ensemble_size = (
            trajs_act_len + inference_horizon - 1
        ) // inference_horizon
        self.temporal_agg_chunk_history = jnp.zeros(
            (
                self._ensemble_size,
                trajs_act_len,
                self._size_act,
            )
        )
        self.temporal_agg_planning_step_counter = 0

        # For RTC (Real-Time Correction)
        self._prev_chunk = None

    def _create_model_encoder(
        self, name: str, size_in: int, size_out: int
    ) -> nn.Module:
        if name == "identity":
            assert size_in == size_out
            return models.Identity()
        elif name == "mlp":
            return models.MLPNet(
                dim_in=size_in,
                dim_hidden=self._config["model_encoder_mlp_hidden"],
                dim_out=size_out,
            )
        elif name == "impala":
            return models.ImpalaEncoder(
                size_stacks_channel=self._config["model_encoder_impala_stacks"],
                size_fc_in=size_in,
                size_fc_hidden=self._config["model_encoder_impala_hidden"],
                size_fc_out=size_out,
                size_block=1,
                use_layer_norm=True,
                dropout_rate=self._config.get("model_encoder_impala_dropout", 0.0),
            )
        # ... other encoder types
        else:
            raise NotImplementedError(f"Encoder model {name} not implemented")

    def _init_combined_state(self):
        # Dummy inputs for initialization
        dummy_traj = jnp.ones((1, self._config["trajs_act_len"], self._size_act))
        dummy_ratio = jnp.ones((1, 1))
        dummy_cond = jnp.ones((1, 2 * self._size_latent_obs))

        # Init actor params
        self._rng, actor_rng = jax.random.split(self._rng)
        actor_params = self.actor_model.init(
            actor_rng, dummy_traj, dummy_ratio, dummy_cond
        )["params"]

        # Combine params
        combined_params = {
            "actor": actor_params,
            "encoder": self.encoder_state.params,  # Now a single encoder
        }

        # Create a single optimizer for all parameters
        tx = optax.adamw(
            learning_rate=self._config["learning_rate"],
            weight_decay=self._config["weight_decay"],
        )

        # Create a single TrainState
        self.combined_state = TrainState.create(
            apply_fn=None,  # Not used directly
            params=combined_params,
            tx=tx,
            ema_params=combined_params,  # Initialize ema with the same params
        )

    @partial(jax.jit, static_argnums=(0,))
    def _update_step(
        self, state: TrainState, rng: jnp.ndarray, batch: Dict
    ) -> Tuple[TrainState, jnp.ndarray]:
        def loss_fn(params):
            # Encode observations
            latent_obs = self.encoder_model.apply(
                {"params": params["encoder"]}, batch["obs"]["proprio"]
            )
            latent_goal = self.encoder_model.apply(
                {"params": params["encoder"]}, batch["goal"]["proprio"]
            )

            # Prepare batch for the loss function
            tensor_cond = jnp.concatenate([latent_obs, latent_goal], axis=-1)

            loss = flow_transport.train_loss(
                model_apply=lambda p, *args: self.actor_model.apply(
                    {"params": p}, *args
                ),
                model_params=params["actor"],
                rng=rng,
                traj_src=batch["traj_src"],
                tensor_cond=tensor_cond,
                traj_dst=batch["traj_dst"],
                transport_func=self._flow_transport_func,
            )
            return loss

        loss, grads = jax.value_and_grad(loss_fn)(state.params)
        state = state.apply_gradients(grads=grads)

        # Update EMA
        ema_decay = self._config.get("ema_decay", 0.999)
        new_ema_params = jax.tree.map(
            lambda ema, p: ema * ema_decay + p * (1 - ema_decay),
            state.ema_params,
            state.params,
        )
        state = state.replace(ema_params=new_ema_params)

        return state, loss

    def train(
        self,
        rng: jnp.ndarray,
        batch_obs: Dict,
        batch_goal: Dict,
        batch_dist: jnp.ndarray,
        batch_traj_obs: Dict,
        batch_traj_act: jnp.ndarray,
        batch_other_obs: Dict,
    ) -> float:
        size_batch = batch_traj_act.shape[0]

        train_batch = {
            "obs": batch_obs,
            "goal": batch_goal,
            "traj_src": jax.random.normal(
                rng, (size_batch, self._config["trajs_act_len"], self._size_act)
            ),
            "traj_dst": batch_traj_act,
        }

        self.combined_state, loss = self._update_step(
            self.combined_state, rng, train_batch
        )
        return loss.item()

    @partial(jax.jit, static_argnums=(0,))
    # TODO: (NE-Flow-Architecture): The current inference pathway performs an internal encoding step.
    # This conflicts with the target architecture where the encoder is pre-positioned and the agent
    # should operate purely on latent vectors. In a future refactoring cycle, this method should
    # be replaced by a version that operates directly on latent inputs.
    def _inference_step(
        self, params: Dict, batch_obs: Dict, batch_goal: Dict, noise: jnp.ndarray
    ) -> jnp.ndarray:
        """
        Performs a standard, unguided inference step within a single JIT-compiled function.
        Encodes observations and goals, then calls the `transport_forward` function.
        """
        # Encode observations
        latent_obs = self.encoder_model.apply(
            {"params": params["encoder"]}, jnp.atleast_2d(batch_obs["proprio"])
        )

        # Encode goals
        latent_goal = self.encoder_model.apply(
            {"params": params["encoder"]}, jnp.atleast_2d(batch_goal["proprio"])
        )

        tensor_cond = jnp.concatenate([latent_obs, latent_goal], axis=-1)

        # If noise is 2D (H, D), expand it to (1, H, D) for consistency.
        # This handles calls from vmap where the batch dimension is stripped.
        if noise.ndim == 2:
            noise = jnp.expand_dims(noise, axis=0)

        # Call the JIT-compiled transport function.
        chain = flow_transport.transport_forward(
            model_apply=lambda p, *args: self.actor_model.apply({"params": p}, *args),
            model_params=params["actor"],
            traj_src=noise,
            tensor_cond=tensor_cond,
            steps=self._config["forward_step"],
            transport_func=self._flow_transport_func,
        )
        return chain[-1]

    def inference(
        self, rng: jnp.ndarray, batch_obs: Dict, batch_goal: Dict
    ) -> jnp.ndarray:
        """
        Public method for standard, unguided inference.
        Generates noise and calls the JIT-compiled `_inference_step` with the current model parameters.

        This method is defensive: if a 1D observation is passed, it will automatically
        be expanded to a 2D array with a leading batch dimension of 1.
        """
        # Defensively ensure that the observation and goal have a batch dimension.
        batch_obs["proprio"] = jnp.atleast_2d(batch_obs["proprio"])
        batch_goal["proprio"] = jnp.atleast_2d(batch_goal["proprio"])

        batch_size = batch_obs["proprio"].shape[0]
        H = self._config["trajs_act_len"]
        noise = jax.random.normal(rng, (batch_size, H, self._size_act))

        return self._inference_step(
            self.combined_state.ema_params, batch_obs, batch_goal, noise
        )

    def reset_history(self):
        """Resets the state for all stateful inference strategies."""
        # For temporal ensembling
        self.temporal_agg_chunk_history = jnp.zeros_like(
            self.temporal_agg_chunk_history
        )
        self.temporal_agg_planning_step_counter = 0
        # For RTC
        self._prev_chunk = None

    @partial(jax.jit, static_argnums=(0,))
    def _inference_rtc_step(
        self,
        params: Dict,
        batch_obs: Dict,
        batch_goal: Dict,
        guidance_target: jnp.ndarray,
        W: jnp.ndarray,
        noise: jnp.ndarray,
    ) -> jnp.ndarray:
        """
        Performs a single RTC inference step within a JIT-compiled function.
        Encodes observations and goals, calculates guidance, and calls `transport_guidance`.
        """
        # Encode observations and goals
        latent_obs = self.encoder_model.apply(
            {"params": params["encoder"]}, batch_obs["proprio"]
        )
        latent_goal = self.encoder_model.apply(
            {"params": params["encoder"]}, batch_goal["proprio"]
        )

        cond = jnp.concatenate([latent_obs, latent_goal], axis=-1)

        # Call JIT-compiled guided flow matching transport
        new_chunk = flow_transport.transport_guidance(
            model_apply=lambda p, *args: self.actor_model.apply({"params": p}, *args),
            model_params=params["actor"],
            noise=noise,
            cond=cond,
            prev_chunk=guidance_target,
            W=W,
            n_steps=self._config.get("forward_step", 5),
            beta=self._config.get("beta", 5.0),
        )
        return new_chunk

    def inference_rtc(
        self, rng: jnp.ndarray, batch_obs: Dict, batch_goal: Dict
    ) -> jnp.ndarray:
        """
        Performs inference using Real-Time Correction (RTC) with pseudo-inverse guidance.

        This method manages the state (`_prev_chunk`) and calls the JIT-compiled
        `_inference_rtc_step` for the actual computation.
        """
        # --- 1. Get parameters and config values ---
        params = self.combined_state.ema_params
        d = self._config.get("delay_steps", 0)  # Inference delay
        s = self._config["inference_horizon"]  # Execution horizon
        H = self._config["trajs_act_len"]  # Prediction horizon
        batch_size = batch_obs["proprio"].shape[0]

        # --- 2. Prepare noise, Guidance Target (Y), and Mask (W) ---
        noise = jax.random.normal(rng, (batch_size, H, self._size_act))

        if self._prev_chunk is None:
            # First step: No guidance. Use a zero target and a zero mask.
            guidance_target = jnp.zeros_like(noise)
            W = jnp.zeros((1, H, 1))
        else:
            # Subsequent steps: Apply sliding window and calculate the proper mask.
            # 2a. Create guidance_target with a sliding window.
            guidance_target = jnp.zeros_like(self._prev_chunk)
            guidance_target = guidance_target.at[:, : H - s, :].set(
                self._prev_chunk[:, s:, :]
            )

            # 2b. Create the three-part soft mask W from the paper's formula.
            i = jnp.arange(H)
            denom = H - s - d + 1.0
            c_i = (H - s - i) / jnp.maximum(denom, 1e-5)
            w_decay = c_i * (jnp.exp(c_i) - 1) / (jnp.e - 1)
            W_1d = jnp.where(i < d, 1.0, jnp.where(i >= H - s, 0.0, w_decay))
            W = W_1d.reshape(1, H, 1)

        # --- 3. Call JIT-compiled guided flow matching transport ---
        new_chunk = self._inference_rtc_step(
            params,
            batch_obs,
            batch_goal,
            guidance_target,
            W,
            noise,
        )

        # --- 4. Update state and return actions ---
        self._prev_chunk = new_chunk

        # Return the first `s` actions from the new chunk.
        return new_chunk[:, :s, :]

    def inference_temporal_ensemble(
        self, rng: jnp.ndarray, batch_obs: Dict, batch_goal: Dict
    ) -> jnp.ndarray:
        """
        Performs a planning step with temporal aggregation.

        This method implements a memory-efficient temporal aggregation strategy
        where only the last `E` (ensemble_window_size) predicted action chunks are
        stored in a circular buffer.

        Returns:
            A sequence of `inference_horizon` actions to be executed, with a batch
            dimension of 1, i.e., shape (1, inference_horizon, action_dim).
        """
        # --- 0. Get config parameters ---
        ensemble_size = self._ensemble_size
        inference_horizon = self._config["inference_horizon"]
        traj_act_len = self._config["trajs_act_len"]
        decay_rate = self._config.get("decay_rate", 0.1)

        # --- 1. Get a new action chunk prediction from the model ---
        new_chunk = self.inference(rng, batch_obs, batch_goal)  # Shape (1, H, D)
        # Squeeze the batch dimension; evaluation is assumed to run with batch_size=1.
        new_chunk = jnp.squeeze(new_chunk, axis=0)  # Shape (H, D)

        # --- 2. Update history circular buffer ---
        # The pointer is determined by the number of planning steps modulo the buffer size.
        history_ptr = self.temporal_agg_planning_step_counter % ensemble_size
        self.temporal_agg_chunk_history = self.temporal_agg_chunk_history.at[
            history_ptr
        ].set(new_chunk)
        self.temporal_agg_planning_step_counter += 1

        # --- 3. Generate actions for the execution horizon ---
        final_actions = []
        for j in range(inference_horizon):
            # For each step `j` in the execution horizon, we aggregate an action.

            # --- 3a. Collect candidate actions for this step `j` ---
            # This uses a "gapped anti-diagonal" slice through the history.
            candidate_actions_list = []
            num_valid_chunks = min(
                self.temporal_agg_planning_step_counter, ensemble_size
            )

            for i in range(num_valid_chunks):  # i is the "age" of the chunk, 0=newest
                # The action for the current global step is at an index in the chunk
                # that depends on the chunk's age (`i`) and the step within the
                # execution horizon (`j`).
                action_idx = i * inference_horizon + j

                if action_idx < traj_act_len:
                    # Read from the circular buffer. The newest chunk is at `history_ptr`,
                    # the next newest is at `history_ptr-1`, etc.
                    chunk_read_ptr = (history_ptr - i + ensemble_size) % ensemble_size
                    chunk = self.temporal_agg_chunk_history[chunk_read_ptr]
                    candidate = chunk[action_idx]
                    candidate_actions_list.append(candidate)

            # --- 3b. Aggregate the collected candidates ---
            if not candidate_actions_list:
                final_action = jnp.zeros((self._size_act,))
            else:
                # The list is ordered from newest to oldest.
                candidates = jnp.stack(candidate_actions_list)

                # Weighted average where older predictions get higher weights.
                num_candidates = candidates.shape[0]
                # To give older actions (at the end of the list) higher weight,
                # we reverse the arange for the exponential decay.
                reversed_arange = jnp.arange(num_candidates - 1, -1, -1)
                weights = jnp.exp(-decay_rate * reversed_arange)
                weights /= jnp.sum(weights)
                weights = jnp.expand_dims(weights, axis=-1)

                final_action = jnp.sum(candidates * weights, axis=0)

            final_actions.append(final_action)

        # --- 4. Finalize output ---
        # Add batch dimension back to maintain a consistent API with other inference methods.
        final_actions_2d = jnp.stack(final_actions)  # Shape (H_inf, D)
        return jnp.expand_dims(final_actions_2d, axis=0)  # Shape (1, H_inf, D)


class TDFlowAgent(GCRLAgentBase):
    def __init__(
        self,
        config: Dict,
        size_latent_obs: int,
        size_act: int,
        rng: jnp.ndarray,
        agent0: GCRLAgent0,  # Add agent0 as a required parameter
    ):
        super().__init__(config, size_latent_obs, size_act, rng)
        self.agent0 = agent0  # Store the agent0 instance

        # Create the vector field model (U-Net for TD-Flow)
        self.v_field_model = models.ModelUnetTDFlow(
            state_dim=self._size_latent_obs,  # Predicts state, not action
            size_emb_transport=self._config["model_flow_unet_emb_size"],
            hidden_dims=self._config.get(
                "model_flow_tdflow_hidden_dims", [256, 128, 64]
            ),
            period_min=self._config["model_flow_unet_emb_period_min"],
            period_max=self._config["model_flow_unet_emb_period_max"],
        )

        # Initialize TrainState for the v_field model
        self._init_v_field_state()

    def _init_v_field_state(self):
        # Dummy inputs for initialization
        dummy_x_t = jnp.ones((1, self._size_latent_obs))
        dummy_t = jnp.ones((1, 1))
        dummy_cond_s = jnp.ones((1, self._size_latent_obs))
        dummy_cond_a = jnp.ones((1, self._size_act))
        dummy_cond_g = jnp.ones((1, self._size_latent_obs))

        # Init v_field params
        self._rng, v_field_rng = jax.random.split(self._rng)
        v_field_params = self.v_field_model.init(
            v_field_rng,
            dummy_x_t,
            dummy_t,
            dummy_cond_s,
            dummy_cond_a,
            dummy_cond_g,
        )["params"]

        # Combine params for a single TrainState
        # TDFlowAgent no longer manages encoders
        combined_params = {
            "v_field": v_field_params,
        }

        tx = optax.adamw(
            learning_rate=self._config["learning_rate"],
            weight_decay=self._config["weight_decay"],
        )

        # Create the TrainState, including target network parameters
        self.v_field_state = TrainState.create(
            apply_fn=self.v_field_model.apply,
            params=combined_params,
            tx=tx,
            ema_params=combined_params,  # Target network starts identical to main network
        )

    @staticmethod
    def _is_goal_achieved(
        state: jnp.ndarray, goal: jnp.ndarray, tol: float = 0.04
    ) -> jnp.ndarray:
        """Check if state is within tolerance of goal."""
        return jnp.linalg.norm(state - goal, axis=-1) < tol

    @partial(jax.jit, static_argnums=(0,))
    def _update_step(
        self, state: TrainState, rng: jnp.ndarray, batch: Dict, agent0_params: Dict
    ) -> Tuple[TrainState, jnp.ndarray]:
        def loss_fn(params):
            # 1. HER and Absorbing Goal Preprocessing
            # s, a, s_prime, g are now expected to be latent vectors
            s_latent, a, s_prime_latent, g_latent = (
                batch["s"],
                batch["a"],
                batch["s_prime"],
                batch["g"],
            )
            at_goal_mask = self._is_goal_achieved(s_prime_latent, g_latent)
            s_prime_abs_latent = jnp.where(
                at_goal_mask[:, None], g_latent, s_prime_latent
            )

            # 2. All relevant states and goals are already in latent space
            # (No encoder calls here)

            # 3. Sample time and noise (noise shape matches latent dim)
            key_t, key_noise_onestep, key_noise_bootstrap = jax.random.split(rng, 3)
            t = jax.random.uniform(key_t, (s_latent.shape[0], 1))
            noise_onestep = jax.random.normal(
                key_noise_onestep, s_prime_abs_latent.shape
            )

            # 4. One-step Loss Calculation (using latent vectors)
            x_t_onestep = t * s_prime_abs_latent + (1 - t) * noise_onestep
            u_t_target = s_prime_abs_latent - noise_onestep

            v_pred_onestep = state.apply_fn(
                {"params": params["v_field"]}, x_t_onestep, t, s_latent, a, g_latent
            )
            loss_onestep = jnp.mean((v_pred_onestep - u_t_target) ** 2)

            # 5. Bootstrap Loss Calculation
            # Get next action from agent0 (uses raw states, which is correct for agent0)
            batch_size = s_prime_abs_latent.shape[0]
            H = self._config["trajs_act_len"]
            D = self._size_act
            noise_batch = jax.random.normal(key_noise_bootstrap, (batch_size, H, D))

            # Use jax.vmap to apply agent0._inference_step to the entire batch
            # agent0_params now contains both encoder and actor params
            # The batch_obs and batch_goal for agent0 are now latent states
            vmapped_agent0_inference = jax.vmap(
                lambda p, obs, goal, noise_val: self.agent0._inference_step(
                    p, {"proprio": obs}, {"proprio": goal}, noise_val
                ),
                in_axes=(
                    None,
                    0,  # obs (latent)
                    0,  # goal (latent)
                    0,  # noise
                ),
                out_axes=0,
            )

            a_prime_batched = vmapped_agent0_inference(
                agent0_params,
                s_prime_abs_latent,  # Pass latent s_prime_abs
                g_latent,  # Pass latent g
                noise_batch,
            )

            a_prime = jax.lax.stop_gradient(
                a_prime_batched[:, 0, 0, :]
            )  # Take the first action of the sequence

            # Define the batched Midpoint ODE solver
            def midpoint_ode_solver_batch(
                x0_batch,
                t_end_batch,
                num_steps,
                s_p_latent_batch,
                a_p_batch,
                g_p_latent_batch,
            ):
                dt_batch = t_end_batch / num_steps

                def scan_body(xt_batch, i):  # TODO: vmap并行化高阶RK ODE求解器
                    t_curr_batch = i * dt_batch

                    # k1 calculation
                    k1 = state.apply_fn(
                        {
                            "params": state.ema_params["v_field"]
                        },  # Use target v_field params
                        xt_batch,
                        t_curr_batch,
                        s_p_latent_batch,
                        a_p_batch,
                        g_p_latent_batch,
                    )

                    # x_mid calculation
                    x_mid_batch = xt_batch + 0.5 * dt_batch * k1
                    t_mid_batch = t_curr_batch + 0.5 * dt_batch

                    # k2 calculation
                    k2 = state.apply_fn(
                        {
                            "params": state.ema_params["v_field"]
                        },  # Use target v_field params
                        x_mid_batch,
                        t_mid_batch,
                        s_p_latent_batch,
                        a_p_batch,
                        g_p_latent_batch,
                    )

                    # step update
                    xt_new_batch = xt_batch + dt_batch * k2
                    return xt_new_batch, None

                final_xt, _ = jax.lax.scan(scan_body, x0_batch, jnp.arange(num_steps))
                return final_xt

            # Generate x_t_bootstrap by solving the ODE
            num_ode_steps = self._config.get("num_ode_steps", 10)
            x0_bootstrap = jax.random.normal(
                key_noise_bootstrap, s_prime_abs_latent.shape
            )
            x_t_bootstrap = midpoint_ode_solver_batch(
                x0_bootstrap, t, num_ode_steps, s_prime_abs_latent, a_prime, g_latent
            )

            # Get the target vector field value at the generated x_t
            v_t_bootstrap_target = jax.lax.stop_gradient(
                state.apply_fn(  # Apply stop_gradient here
                    {
                        "params": state.ema_params["v_field"]
                    },  # Use target v_field params
                    x_t_bootstrap,
                    t,
                    s_prime_abs_latent,
                    a_prime,
                    g_latent,
                )
            )

            v_pred_bootstrap = state.apply_fn(
                {"params": params["v_field"]}, x_t_bootstrap, t, s_latent, a, g_latent
            )
            loss_bootstrap = jnp.mean((v_pred_bootstrap - v_t_bootstrap_target) ** 2)

            # 6. Total Loss
            gamma = self._config.get("gamma", 0.99)
            total_loss = (1 - gamma) * loss_onestep + gamma * loss_bootstrap
            return total_loss, {
                "onestep": loss_onestep,
                "bootstrap": loss_bootstrap,
            }

        (loss, aux_losses), grads = jax.value_and_grad(loss_fn, has_aux=True)(
            state.params
        )
        state = state.apply_gradients(grads=grads)

        # Update target network (EMA)
        tau = self._config.get("tau", 0.005)
        new_target_params = jax.tree.map(
            lambda p, tp: p * tau + tp * (1 - tau),
            state.params,
            state.ema_params,
        )
        state = state.replace(ema_params=new_target_params)

        # Reconstruct the full loss dictionary for logging
        all_losses = {"total": loss, **aux_losses}

        return state, all_losses

    def train(
        self,
        rng: jnp.ndarray,
        s_latent: jnp.ndarray,
        a: jnp.ndarray,
        s_prime_latent: jnp.ndarray,
        g_latent: jnp.ndarray,
        agent0_params: Dict,  # agent0's combined params
    ) -> float:
        train_batch = {
            "s": s_latent,
            "a": a,
            "s_prime": s_prime_latent,
            "g": g_latent,
        }

        self.v_field_state, loss = self._update_step(
            self.v_field_state, rng, train_batch, agent0_params
        )
        return loss

    def plan_next_waypoint(
        self,
        s_curr_latent: jnp.ndarray,
        g_final_latent: jnp.ndarray,
        rng: jnp.ndarray,
    ) -> jnp.ndarray:
        """
        Plans the next waypoint by solving the conditioned ODE.

        This acts as the high-level policy, generating a subgoal by integrating
        the learned vector field from a random starting point to t=1.

        Args:
            s_curr_latent: The current state (unbatched), in latent space.
            g_final_latent: The final goal (unbatched), in latent space.
            rng: JAX random key.

        Returns:
            The next waypoint (subgoal) in latent space, unbatched.
        """
        # 1. Get action from agent0. `inference` is robust to unbatched inputs.
        rng, act_rng, ode_rng = jax.random.split(rng, 3)
        a_curr_chunk = self.agent0.inference(
            act_rng, {"proprio": s_curr_latent}, {"proprio": g_final_latent}
        )
        # a_curr_chunk is (1, H, D), so a_curr is (1, D)
        a_curr = a_curr_chunk[:, 0, :]

        # 2. Prepare batched inputs for the ODE solver.
        s_curr_batched = s_curr_latent[None, :]
        g_final_batched = g_final_latent[None, :]
        # The starting point for the ODE is random noise of the same batched shape.
        x_0 = jax.random.normal(ode_rng, s_curr_batched.shape)

        # 3. Solve the ODE to get the next waypoint.
        num_ode_steps = self._config.get("num_ode_steps_planning", 20)
        next_waypoint_batched = flow_transport.solve_ode_midpoint(
            v_field_apply=self.v_field_model.apply,
            v_field_params=self.v_field_state.params["v_field"],
            x_0=x_0,
            s_latent=s_curr_batched,
            a=a_curr,
            g_latent=g_final_batched,
            num_steps=num_ode_steps,
        )

        # 4. Return the result, removing the batch dimension for the caller.
        return next_waypoint_batched.squeeze(axis=0)
