"""
This module provides a grain-based data loading pipeline for goal-conditioned
offline reinforcement learning, designed for performance and determinism.
"""

import dataclasses
from typing import Dict, Tuple

import grain.python as grain
import numba
import numpy as np


@numba.jit(nopython=True)
def _generate_sample_numba(
    index: int,
    valid_start_indices: np.ndarray,
    observations: np.ndarray,
    actions: np.ndarray,
    episode_length: int,
    total_timesteps: int,
    max_dist_goal: int,
    trajs_obs_len: int,
    trajs_act_len: int,
    is_act_vel: bool,
) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:
    """
    Numba-jitted function to generate a single, processed sample.
    This contains the performance-critical logic.
    """
    # Simple LCG pseudo-random number generator for deterministic, seedable RNG inside Numba
    # Parameters from Numerical Recipes (a=1664525, c=1013904223, m=2**32)
    rand_int1 = (1664525 * index + 1013904223) % 4294967296
    rand_int2 = (1664525 * rand_int1 + 1013904223) % 4294967296

    # 1. Get a valid starting timestep `t`
    t = valid_start_indices[index]

    # 2. Determine the boundaries for goal `g`
    episode_idx = t // episode_length
    episode_end = (episode_idx + 1) * episode_length - 1
    goal_upper_bound = min(episode_end, t + max_dist_goal)

    # 3. Randomly select goal `g` using our LCG
    goal_range = goal_upper_bound - t
    if goal_range > 0:
        g = t + (rand_int1 % (goal_range + 1))
    else:
        g = t

    # 4. Extract current and goal observations
    obs_t = observations[t]
    obs_g = observations[g]

    # 5. Calculate normalized distance
    dist = g - t
    norm_dist = dist / max_dist_goal

    # 6. Build observation trajectory
    obs_traj_indices = t + np.arange(trajs_obs_len) * 1
    obs_traj_indices_clipped = np.minimum(obs_traj_indices, g)
    traj_obs = observations[obs_traj_indices_clipped]

    # 7. Build action trajectory
    act_traj_indices_unclipped = np.arange(trajs_act_len) * 1
    act_traj_indices = t + act_traj_indices_unclipped
    traj_act = actions[act_traj_indices].copy()  # Use .copy() to make it writable

    if is_act_vel:
        # Zero out actions for timesteps after the goal is reached
        for i in range(trajs_act_len):
            if act_traj_indices_unclipped[i] >= dist:
                traj_act[i] = 0.0  # Broadcast 0.0 to the action vector

    # 8. Sample a random other observation from the entire dataset
    other_obs_idx = rand_int2 % total_timesteps
    other_obs = observations[other_obs_idx]

    return obs_t, obs_g, norm_dist, traj_obs, traj_act, other_obs


class TrajectoryData:
    """
    A data container that loads, verifies, and holds trajectory data from a .npz file.

    This class is responsible for all disk I/O. It loads the entire dataset
    into memory and verifies that all episodes within the dataset have a
    consistent length, which is a critical assumption for the sampling process.
    """

    def __init__(self, npz_path: str):
        """
        Initializes the data loader.

        Args:
            npz_path: Path to the .npz file containing the trajectory data.
        """
        self.npz_path = npz_path
        self.data: Dict[str, np.ndarray] = {}
        self.episode_length: int = 0
        self.total_timesteps: int = 0
        self.num_episodes: int = 0

    def load_and_verify(self):
        """
        Loads data from the .npz file and verifies episode length consistency.

        Raises:
            AssertionError: If episodes have inconsistent lengths.
            FileNotFoundError: If the .npz file does not exist.
            KeyError: If essential keys ('terminals', 'observations', 'actions')
                      are missing from the .npz file.
        """
        print(f"Loading data from {self.npz_path}...")
        with np.load(self.npz_path) as loaded_data:
            # Separate observations from other data
            self.data = {
                key: loaded_data[key]
                for key in loaded_data
                if key not in ["observations", "actions", "terminals"]
            }
            self.data["observations"] = loaded_data["observations"]
            self.data["actions"] = loaded_data["actions"]
            self.data["terminals"] = loaded_data["terminals"]
            terminals = self.data["terminals"]

        print("Verifying episode length consistency...")
        terminal_indices = np.where(terminals == 1)[0]
        if len(terminal_indices) < 2:
            print("Warning: Not enough terminal points to verify episode length.")
            # Assume the whole dataset is one episode
            self.episode_length = len(terminals)
            self.num_episodes = 1
        else:
            distances = np.diff(terminal_indices)
            min_dist, max_dist = np.min(distances), np.max(distances)
            assert min_dist == max_dist, (
                f"Inconsistent episode lengths detected: min={min_dist}, max={max_dist}"
            )
            self.episode_length = min_dist
            self.num_episodes = len(distances)

        self.total_timesteps = len(terminals)
        print(f"Data loaded successfully. Total timesteps: {self.total_timesteps}")
        print(
            f"Found {self.num_episodes} episodes with a consistent length of {self.episode_length}."
        )


@dataclasses.dataclass(frozen=True)
class GCSamplingConfig:
    """Configuration for the goal-conditioned sampling data source."""

    trajs_obs_len: int
    trajs_act_len: int
    max_dist_goal: int
    is_act_vel: bool = True


class GCSamplingSource(grain.RandomAccessDataSource):
    """
    A grain DataSource for goal-conditioned sub-trajectory sampling.

    This class implements the core sampling logic. It treats the dataset as a
    large virtual collection of valid starting timesteps. The __getitem__ method
    takes an index into this virtual collection and deterministically generates
    a complete 5-tuple sample, ready to be batched and fed to a model.
    """

    def __init__(self, trajectory_data: TrajectoryData, config: GCSamplingConfig):
        """
        Initializes the sampling data source.

        Args:
            trajectory_data: A TrajectoryData object holding the dataset in memory.
            config: A GCSamplingConfig object with sampling parameters.
        """
        self.data = trajectory_data.data
        self.episode_length = trajectory_data.episode_length
        self.num_episodes = trajectory_data.num_episodes
        self.total_timesteps = trajectory_data.total_timesteps
        self.config = config

        # Pre-calculate valid starting indices `t`
        self._precompute_valid_indices()

        # For Numba, it's better to have direct references to the arrays
        self.observations = self.data["observations"]
        self.actions = self.data["actions"]

    def _precompute_valid_indices(self):
        """
        Calculates all indices `t` that are valid starting points for a trajectory.

        A starting point is invalid if it's too close to the end of an episode
        to sample a full trajectory.
        """
        print("Pre-computing valid sampling indices...")
        terminals = self.data["terminals"]
        terminal_indices = np.where(terminals == 1)[0]

        # Determine the maximum lookahead needed for any trajectory
        max_lookahead = max(
            self.config.trajs_obs_len,
            self.config.trajs_act_len,
        )

        # Create a mask for all timesteps, initially all valid
        valid_mask = np.ones(self.total_timesteps, dtype=np.bool_)

        # Invalidate indices that are too close to the end of any episode
        for end_idx in terminal_indices:
            start_of_invalid_window = max(0, end_idx - max_lookahead + 1)
            valid_mask[start_of_invalid_window : end_idx + 1] = False

        self.valid_start_indices = np.where(valid_mask)[0]
        print(f"Found {len(self.valid_start_indices)} valid starting points.")

    def __len__(self) -> int:
        """Returns the total number of valid starting points we can sample from."""
        return len(self.valid_start_indices)

    def __getitem__(
        self, index: int
    ) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:
        """
        Generates a single, processed sample for the given index by calling
        a Numba-jitted function.
        """
        # The first call to this will be slow due to JIT compilation.
        # Subsequent calls will be fast.
        return _generate_sample_numba(
            index,
            self.valid_start_indices,
            self.observations,
            self.actions,
            self.episode_length,
            self.total_timesteps,
            self.config.max_dist_goal,
            self.config.trajs_obs_len,
            self.config.trajs_act_len,
            self.config.is_act_vel,
        )
