import dataclasses
from functools import partial
from typing import Any

import jax
import jax.numpy as jnp
import numpy as np
from flax.core.frozen_dict import FrozenDict


def get_size(data):
    """Return the size of the dataset."""
    
    sizes = jax.tree_util.tree_map(lambda arr: len(arr), data)
    return max(jax.tree_util.tree_leaves(sizes))


@partial(jax.jit, static_argnames=('padding',))
def random_crop(img, crop_from, padding):
    """Randomly crop an image.

    Args:
        img: Image to crop.
        crop_from: Coordinates to crop from.
        padding: Padding size.
    """
    padded_img = jnp.pad(img, ((padding, padding), (padding, padding), (0, 0)), mode='edge')
    return jax.lax.dynamic_slice(padded_img, crop_from, img.shape)


@partial(jax.jit, static_argnames=('padding',))
def batched_random_crop(imgs, crop_froms, padding):
    """Batched version of random_crop."""
    return jax.vmap(random_crop, (0, 0, None))(imgs, crop_froms, padding)


class Dataset(FrozenDict):
    """Dataset class.

    This class supports both regular datasets (i.e., storing both observations and next_observations) and
    compact datasets (i.e., storing only observations). It assumes 'observations' is always present in the keys. If
    'next_observations' is not present, it will be inferred from 'observations' by shifting the indices by 1. In this
    case, set 'valids' appropriately to mask out the last state of each trajectory.
    """

    @classmethod
    def create(cls, freeze=True, **fields):
        """Create a dataset from the fields.

        Args:
            freeze: Whether to freeze the arrays.
            **fields: Keys and values of the dataset.
        """
        data = fields
        assert 'observations' in data
        if freeze:
            jax.tree_util.tree_map(lambda arr: arr.setflags(write=False), data)
        return cls(data)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.size = get_size(self._dict)
        if 'valids' in self._dict:
            (self.valid_idxs,) = np.nonzero(self['valids'] > 0)

    def get_random_idxs(self, num_idxs):
        """Return `num_idxs` random indices."""
        if 'valids' in self._dict:
            return self.valid_idxs[np.random.randint(len(self.valid_idxs), size=num_idxs)]
        else:
            return np.random.randint(self.size, size=num_idxs)

    def sample(self, batch_size, idxs=None):
        """Sample a batch of transitions."""
        if idxs is None:
            idxs = self.get_random_idxs(batch_size)
        return self.get_subset(idxs)

    def get_subset(self, idxs):
        """Return a subset of the dataset given the indices."""
        result = jax.tree_util.tree_map(lambda arr: arr[idxs], self._dict)
        if 'next_observations' not in result:
            result['next_observations'] = self._dict['observations'][np.minimum(idxs + 1, self.size - 1)]
        return result


class ReplayBuffer(Dataset):
    """Replay buffer class.

    This class extends Dataset to support adding transitions.
    """

    @classmethod
    def create(cls, transition, size):
        """Create a replay buffer from the example transition.

        Args:
            transition: Example transition (dict).
            size: Size of the replay buffer.
        """

        def create_buffer(example):
            example = np.array(example)
            return np.zeros((size, *example.shape), dtype=example.dtype)

        buffer_dict = jax.tree_util.tree_map(create_buffer, transition)
        return cls(buffer_dict)

    @classmethod
    def create_from_initial_dataset(cls, init_dataset, size):
        """Create a replay buffer from the initial dataset.

        Args:
            init_dataset: Initial dataset.
            size: Size of the replay buffer.
        """

        def create_buffer(init_buffer):
            buffer = np.zeros((size, *init_buffer.shape[1:]), dtype=init_buffer.dtype)
            buffer[: len(init_buffer)] = init_buffer
            return buffer

        buffer_dict = jax.tree_util.tree_map(create_buffer, init_dataset)
        dataset = cls(buffer_dict)
        dataset.size = dataset.pointer = get_size(init_dataset)
        return dataset

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.max_size = get_size(self._dict)
        self.size = 0
        self.pointer = 0

    def add_transition(self, transition):
        """Add a transition to the replay buffer."""

        def set_idx(buffer, new_element):
            buffer[self.pointer] = new_element

        jax.tree_util.tree_map(set_idx, self._dict, transition)
        self.pointer = (self.pointer + 1) % self.max_size
        self.size = max(self.pointer, self.size)

    def clear(self):
        """Clear the replay buffer."""
        self.size = self.pointer = 0


@dataclasses.dataclass
class GCDataset:
    """Dataset class for goal-conditioned RL.

    This class provides a method to sample a batch of transitions with goals (value_goals and actor_goals) from the
    dataset. The goals are sampled from the current state, future states in the same trajectory, and random states.
    It also supports frame stacking and random-cropping image augmentation.

    It reads the following keys from the config:
    - discount: Discount factor for geometric sampling.
    - value_p_curgoal: Probability of using the current state as the value goal.
    - value_p_trajgoal: Probability of using a future state in the same trajectory as the value goal.
    - value_p_randomgoal: Probability of using a random state as the value goal.
    - value_geom_sample: Whether to use geometric sampling for future value goals.
    - actor_p_curgoal: Probability of using the current state as the actor goal.
    - actor_p_trajgoal: Probability of using a future state in the same trajectory as the actor goal.
    - actor_p_randomgoal: Probability of using a random state as the actor goal.
    - actor_geom_sample: Whether to use geometric sampling for future actor goals.
    - gc_negative: Whether to use '0 if s == g else -1' (True) or '1 if s == g else 0' (False) as the reward.
    - p_aug: Probability of applying image augmentation.
    - frame_stack: Number of frames to stack.

    Attributes:
        dataset: Dataset object.
        config: Configuration dictionary.
        preprocess_frame_stack: Whether to preprocess frame stacks. If False, frame stacks are computed on-the-fly. This
            saves memory but may slow down training.
    """

    dataset: Dataset
    config: Any
    preprocess_frame_stack: bool = True

    def __post_init__(self):
        self.size = self.dataset.size

        # Pre-compute trajectory boundaries.
        (self.terminal_locs,) = np.nonzero(self.dataset['terminals'] > 0)
        self.initial_locs = np.concatenate([[0], self.terminal_locs[:-1] + 1])
        assert self.terminal_locs[-1] == self.size - 1

        # Assert probabilities sum to 1.
        assert np.isclose(
            self.config['value_p_curgoal'] + self.config['value_p_trajgoal'] + self.config['value_p_randomgoal'], 1.0
        )
        assert np.isclose(
            self.config['actor_p_curgoal'] + self.config['actor_p_trajgoal'] + self.config['actor_p_randomgoal'], 1.0
        )

        if self.config['frame_stack'] is not None:
            # Only support compact (observation-only) datasets.
            assert 'next_observations' not in self.dataset
            if self.preprocess_frame_stack:
                stacked_observations = self.get_stacked_observations(np.arange(self.size))
                self.dataset = Dataset(self.dataset.copy(dict(observations=stacked_observations)))

    def sample(self, batch_size, idxs=None, evaluation=False):
        """Sample a batch of transitions with goals.

        This method samples a batch of transitions with goals (value_goals and actor_goals) from the dataset. They are
        stored in the keys 'value_goals' and 'actor_goals', respectively. It also computes the 'rewards' and 'masks'
        based on the indices of the goals.

        Args:
            batch_size: Batch size.
            idxs: Indices of the transitions to sample. If None, random indices are sampled.
            evaluation: Whether to sample for evaluation. If True, image augmentation is not applied.
        """
        if idxs is None:
            idxs = self.dataset.get_random_idxs(batch_size)

        batch = self.dataset.sample(batch_size, idxs)
        if self.config['frame_stack'] is not None:
            batch['observations'] = self.get_observations(idxs)
            batch['next_observations'] = self.get_observations(idxs + 1)

        value_goal_idxs = self.sample_goals(
            idxs,
            self.config['value_p_curgoal'],
            self.config['value_p_trajgoal'],
            self.config['value_p_randomgoal'],
            self.config['value_geom_sample'],
        )
        actor_goal_idxs = self.sample_goals(
            idxs,
            self.config['actor_p_curgoal'],
            self.config['actor_p_trajgoal'],
            self.config['actor_p_randomgoal'],
            self.config['actor_geom_sample'],
        )

        batch['value_goals'] = self.get_observations(value_goal_idxs)
        batch['actor_goals'] = self.get_observations(actor_goal_idxs)
        successes = (idxs == value_goal_idxs).astype(float)
        batch['masks'] = 1.0 - successes
        batch['rewards'] = successes - (1.0 if self.config['gc_negative'] else 0.0)

        if self.config['p_aug'] is not None and not evaluation:
            if np.random.rand() < self.config['p_aug']:
                self.augment(batch, ['observations', 'next_observations', 'value_goals', 'actor_goals'])

        return batch

    def sample_goals(self, idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample):
        """Sample goals for the given indices."""
        batch_size = len(idxs)

        # Random goals.
        random_goal_idxs = self.dataset.get_random_idxs(batch_size)

        # Goals from the same trajectory (excluding the current state, unless it is the final state).
        final_state_idxs = self.terminal_locs[np.searchsorted(self.terminal_locs, idxs)]
        if geom_sample:
            # Geometric sampling.
            offsets = np.random.geometric(p=1 - self.config['discount'], size=batch_size)  # in [1, inf)
            traj_goal_idxs = np.minimum(idxs + offsets, final_state_idxs)
        else:
            # Uniform sampling.
            distances = np.random.rand(batch_size)  # in [0, 1)
            traj_goal_idxs = np.round(
                (np.minimum(idxs + 1, final_state_idxs) * distances + final_state_idxs * (1 - distances))
            ).astype(int)
        if p_curgoal == 1.0:
            goal_idxs = idxs
        else:
            goal_idxs = np.where(
                np.random.rand(batch_size) < p_trajgoal / (1.0 - p_curgoal), traj_goal_idxs, random_goal_idxs
            )

            # Goals at the current state.
            goal_idxs = np.where(np.random.rand(batch_size) < p_curgoal, idxs, goal_idxs)

        return goal_idxs

    def augment(self, batch, keys):
        """Apply image augmentation to the given keys."""
        padding = 3
        batch_size = len(batch[keys[0]])
        crop_froms = np.random.randint(0, 2 * padding + 1, (batch_size, 2))
        crop_froms = np.concatenate([crop_froms, np.zeros((batch_size, 1), dtype=np.int64)], axis=1)
        for key in keys:
            batch[key] = jax.tree_util.tree_map(
                lambda arr: np.array(batched_random_crop(arr, crop_froms, padding)) if len(arr.shape) == 4 else arr,
                batch[key],
            )

    def get_observations(self, idxs):
        """Return the observations for the given indices."""
        if self.config['frame_stack'] is None or self.preprocess_frame_stack:
            return jax.tree_util.tree_map(lambda arr: arr[idxs], self.dataset['observations'])
        else:
            return self.get_stacked_observations(idxs)

    def get_stacked_observations(self, idxs):
        """Return the frame-stacked observations for the given indices."""
        initial_state_idxs = self.initial_locs[np.searchsorted(self.initial_locs, idxs, side='right') - 1]
        rets = []
        for i in reversed(range(self.config['frame_stack'])):
            cur_idxs = np.maximum(idxs - i, initial_state_idxs)
            rets.append(jax.tree_util.tree_map(lambda arr: arr[cur_idxs], self.dataset['observations']))
        return jax.tree_util.tree_map(lambda *args: np.concatenate(args, axis=-1), *rets)


@dataclasses.dataclass
class HGCDataset(GCDataset):
    """Dataset class for hierarchical goal-conditioned RL.

    This class extends GCDataset to support high-level actor goals and prediction targets. It reads the following
    additional key from the config:
    - subgoal_steps: Subgoal steps (i.e., the number of steps to reach the low-level goal).
    """

    def sample(self, batch_size, idxs=None, evaluation=False):
        """Sample a batch of transitions with goals.

        This method samples a batch of transitions with goals from the dataset. The goals are stored in the keys
        'value_goals', 'low_actor_goals', 'high_actor_goals', and 'high_actor_targets'. It also computes the 'rewards'
        and 'masks' based on the indices of the goals.

        Args:
            batch_size: Batch size.
            idxs: Indices of the transitions to sample. If None, random indices are sampled.
            evaluation: Whether to sample for evaluation. If True, image augmentation is not applied.
        """
        if idxs is None:
            idxs = self.dataset.get_random_idxs(batch_size)

        batch = self.dataset.sample(batch_size, idxs)
        if self.config['frame_stack'] is not None:
            batch['observations'] = self.get_observations(idxs)
            batch['next_observations'] = self.get_observations(idxs + 1)

        # Sample value goals.
        value_goal_idxs = self.sample_goals(
            idxs,
            self.config['value_p_curgoal'],
            self.config['value_p_trajgoal'],
            self.config['value_p_randomgoal'],
            self.config['value_geom_sample'],
        )
        batch['value_goals'] = self.get_observations(value_goal_idxs)

        successes = (idxs == value_goal_idxs).astype(float)
        batch['masks'] = 1.0 - successes
        batch['rewards'] = successes - (1.0 if self.config['gc_negative'] else 0.0)

        # Set low-level actor goals.
        final_state_idxs = self.terminal_locs[np.searchsorted(self.terminal_locs, idxs)]
        low_goal_idxs = np.minimum(idxs + self.config['subgoal_steps'], final_state_idxs)
        batch['low_actor_goals'] = self.get_observations(low_goal_idxs)

        # Sample high-level actor goals and set prediction targets.
        # High-level future goals.
        if self.config['actor_geom_sample']:
            # Geometric sampling.
            offsets = np.random.geometric(p=1 - self.config['discount'], size=batch_size)  # in [1, inf)
            high_traj_goal_idxs = np.minimum(idxs + offsets, final_state_idxs)
        else:
            # Uniform sampling.
            distances = np.random.rand(batch_size)  # in [0, 1)
            high_traj_goal_idxs = np.round(
                (np.minimum(idxs + 1, final_state_idxs) * distances + final_state_idxs * (1 - distances))
            ).astype(int)
        high_traj_target_idxs = np.minimum(idxs + self.config['subgoal_steps'], high_traj_goal_idxs)

        # High-level random goals.
        high_random_goal_idxs = self.dataset.get_random_idxs(batch_size)
        high_random_target_idxs = np.minimum(idxs + self.config['subgoal_steps'], final_state_idxs)

        # Pick between high-level future goals and random goals.
        pick_random = np.random.rand(batch_size) < self.config['actor_p_randomgoal']
        high_goal_idxs = np.where(pick_random, high_random_goal_idxs, high_traj_goal_idxs)
        high_target_idxs = np.where(pick_random, high_random_target_idxs, high_traj_target_idxs)

        batch['high_actor_goals'] = self.get_observations(high_goal_idxs)
        batch['high_actor_targets'] = self.get_observations(high_target_idxs)

        if self.config['p_aug'] is not None and not evaluation:
            if np.random.rand() < self.config['p_aug']:
                self.augment(
                    batch,
                    [
                        'observations',
                        'next_observations',
                        'value_goals',
                        'low_actor_goals',
                        'high_actor_goals',
                        'high_actor_targets',
                    ],
                )

        return batch


# ===========================================================================
#  JSON-driven key-state relabel for Point-Maze (and similar) datasets
#  Place *after* the existing Dataset / GCDataset / HGCDataset definitions
# ===========================================================================

import json
import textwrap
from pathlib import Path
from typing import Dict, Callable

import numpy as np


# ---------------------------------------------------------------------------
#  1) Mixin：预计算关键状态查表（含 ks_any 并集）—— 修复未命中哨兵
# ---------------------------------------------------------------------------
from typing import Dict, Callable
import numpy as np
import textwrap, json
from pathlib import Path

def _load_keystate_json(json_path: str):
    js = json.loads(Path(json_path).read_text())
    ks_funcs = {}
    for name, src in js["discriminators"].items():
        ks_name = f"ks_{name}"
        func_code = textwrap.dedent(
            src.replace("\\n", "\n")
               .replace("&&", " and ")
               .replace("||", " or ")
        )
        local_ns = {}
        exec(func_code, {}, local_ns)
        ks_funcs[ks_name] = next(iter(local_ns.values()))
    return ks_funcs


class _KeyStateMixin:
    ks_funcs: Dict[str, Callable]
    ks_tables: Dict[str, np.ndarray]
    default_ks: str

    # ---------- public ----------
    def _init_keystate(self, json_path: str, default_ks: str):
        self.ks_funcs = _load_keystate_json(json_path)
        self.default_ks = default_ks
        self._build_keystate_tables()

    def _nearest_keystate(self, idxs: np.ndarray, ks_name: str) -> np.ndarray:
        """Return nearest future key-state index for each idx (or -1 if none)."""
        return self.ks_tables[ks_name][idxs]

    # ---------- internal ----------
    def _build_keystate_tables(self):
        obs = self.dataset["observations"]      # shape (N, obs_dim)
        terminals = self.dataset["terminals"]
        N = len(obs)

        # 以 -1 作为未命中哨兵，避免误把段尾当关键状态
        self.ks_tables = {k: np.full(N, -1, dtype=np.int32) for k in self.ks_funcs}
        self.ks_tables["ks_any"] = np.full(N, -1, dtype=np.int32)

        term_idxs = np.nonzero(terminals > 0)[0]
        seg_start = 0
        for seg_end in term_idxs:               # inclusive
            latest = {k: -1 for k in self.ks_funcs}
            latest_any = -1
            # 逆序扫描，记录最近关键状态
            for i in range(seg_end, seg_start - 1, -1):
                cur  = obs[i].tolist()
                prev = obs[i - 1].tolist() if i - 1 >= seg_start else None
                nxt  = obs[i + 1].tolist() if i + 1 <= seg_end   else None

                hit_any = False
                for ks, func in self.ks_funcs.items():
                    try:
                        if func(cur, prev, nxt):
                            latest[ks] = i
                            hit_any = True
                    except Exception:
                        # 判别器异常视为未命中
                        pass
                if hit_any:
                    latest_any = i

                # 写回所有表（若仍为 -1，表示未来都未命中）
                for ks in self.ks_funcs:
                    self.ks_tables[ks][i] = latest[ks]
                self.ks_tables["ks_any"][i] = latest_any
            seg_start = seg_end + 1


# ---------------------------------------------------------------------------
#  2) KeyJsonGCDataset：在同轨迹未来分支按 ks_prob/horizon 替换 —— 加入哨兵判断
# ---------------------------------------------------------------------------
import dataclasses
from typing import Any

@dataclasses.dataclass
class KeyJsonGCDataset(_KeyStateMixin, GCDataset):
    """
    GCDataset + key-state relabel driven by a JSON file.
    仅改动：建表使用 -1 哨兵；采样时要求 ks_future >= 0。
    """
    json_path: str = "./key_states/PointMaze.json"
    use_keystate: bool = True
    keystate_name: str = "ks_any"
    ks_prob: float = 0.3
    ks_horizon: int = 20

    def __post_init__(self):
        super().__post_init__()                              # 调用 GCDataset
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def sample_goals(
        self,
        idxs,
        p_curgoal,
        p_trajgoal,
        p_randomgoal,
        geom_sample,
    ):
        """
        与 GCDataset.sample_goals 接口一致，但在 traj-goal 分支里：
        以 ks_prob 概率 → 最近关键状态 (≤ ks_horizon 且 ks_future>=0)；
        否则或超限/未命中 → 回退到父类策略 (future/random)。
        """
        if (not self.use_keystate) or self.ks_prob <= 0:
            return super().sample_goals(
                idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample
            )

        batch = len(idxs)
        # 父类先计算一份 fallback
        fallback_idxs = GCDataset.sample_goals(
            self, idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample
        )

        # 最近关键状态 idx（可能为 -1，表示未来无命中）
        ks_future = self._nearest_keystate(idxs, self.keystate_name).astype(np.int32)

        # 必须命中且在 horizon 内
        within = (ks_future >= 0) & ((ks_future - idxs) <= self.ks_horizon)
        use_ks = (np.random.rand(batch) < self.ks_prob) & within

        goal_idxs = np.where(use_ks, ks_future, fallback_idxs)
        return goal_idxs.astype(np.int32)


# ---------------------------------------------------------------------------
#  3) KeyJsonHGCDataset：同样加入哨兵判断（影响 value_goals）
# ---------------------------------------------------------------------------
@dataclasses.dataclass
class KeyJsonHGCDataset(_KeyStateMixin, HGCDataset):
    """
    HGCDataset + JSON key-state relabel.
    仅改动：建表使用 -1 哨兵；采样时要求 ks_future >= 0。
    """
    json_path: str = "./key_states/PointMaze.json"
    use_keystate: bool = True
    keystate_name: str = "ks_any"
    ks_prob: float = 0.5
    ks_horizon: int = 20

    def __post_init__(self):
        super().__post_init__()                    # HGCDataset init
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def sample_goals(
        self,
        idxs,
        p_curgoal,
        p_trajgoal,
        p_randomgoal,
        geom_sample,
    ):
        if (not self.use_keystate) or self.ks_prob <= 0:
            return super().sample_goals(
                idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample
            )

        batch = len(idxs)
        # 父类 fallback（HGCDataset 的 value_goals 采样）
        fallback = HGCDataset.sample_goals(
            self, idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample
        )

        ks_future = self._nearest_keystate(idxs, self.keystate_name).astype(np.int32)
        within = (ks_future >= 0) & ((ks_future - idxs) <= self.ks_horizon)
        use_ks = (np.random.rand(batch) < self.ks_prob) & within

        goal_idxs = np.where(use_ks, ks_future, fallback)
        return goal_idxs.astype(np.int32)



# ===========================================================================
#  Cross-trajectory data-augmentation mixin & concrete datasets
# ===========================================================================

import dataclasses
from typing import Any
import numpy as np

# ---------------------------------------------------------------------------
# 1) 通用 Mixin：先按父类采样，再按 cross_p 概率跨轨迹替换
# ---------------------------------------------------------------------------
class CrossTrajectoryMixin:
    """
    Mixin：在 GCDataset/HGCDataset 的 sample_goals 结果上做跨轨迹增强。
    子类需实现 _sample_cross_goal(idx) -> int ：返回一个与 idx 不同轨迹的索引。
    """

    # 由最终 dataclass 继承，可在 YAML 里改
    cross_p: float = 0.0  # 0 ~ 1，0 表示关闭增强

    # ---------- override sample_goals ----------
    def sample_goals(
        self,
        idxs,
        p_curgoal,
        p_trajgoal,
        p_randomgoal,
        geom_sample,
    ):
        # baseline — 先用原方法采样
        goal_idxs = super().sample_goals(
            idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample
        ).copy()

        if self.cross_p <= 0:
            return goal_idxs

        # 以 cross_p 概率替换
        to_aug = np.random.rand(len(idxs)) < self.cross_p
        for b, aug in enumerate(to_aug):
            if not aug:
                continue
            goal_idxs[b] = self._sample_cross_goal(idxs[b])
        return goal_idxs.astype(np.int32)

    # ---------- 子类必须实现 ----------
    def _sample_cross_goal(self, idx: int) -> int:  # pylint: disable=unused-argument
        raise NotImplementedError


import dataclasses
import numpy as np
from sklearn.cluster import MiniBatchKMeans

# 这里假设 CrossTrajectoryMixin, GCDataset, HGCDataset 已在同文件中定义

# ---------------------------
# 1) 随机跨轨迹 (Rand-Stitch)
# ---------------------------
@dataclasses.dataclass
class RandStitchGCDataset(CrossTrajectoryMixin, GCDataset):
    cross_p: float = 0.5  # 增强概率

    def __post_init__(self):
        super().__post_init__()
        self.traj_id = np.zeros(self.size, dtype=np.int32)
        tid = 0
        term_set = set(self.terminal_locs.tolist())
        for i in range(self.size):
            self.traj_id[i] = tid
            if i in term_set:
                tid += 1
        self.num_trajs = tid  # 修复：不再是 tid + 1

    def _sample_cross_goal(self, idx: int) -> int:
        my_tid = self.traj_id[idx]
        # 随机挑选不同轨迹的任何一个索引作为目标
        while True:
            j = np.random.randint(self.size)
            if self.traj_id[j] != my_tid:
                return j


@dataclasses.dataclass
class RandStitchHGCDataset(CrossTrajectoryMixin, HGCDataset):
    cross_p: float = 0.5

    def __post_init__(self):
        super().__post_init__()
        self.traj_id = np.zeros(self.size, dtype=np.int32)
        tid = 0
        term_set = set(self.terminal_locs.tolist())
        for i in range(self.size):
            self.traj_id[i] = tid
            if i in term_set:
                tid += 1
        self.num_trajs = tid  # 修复：不再是 tid + 1

    def _sample_cross_goal(self, idx: int) -> int:
        my_tid = self.traj_id[idx]
        while True:
            j = np.random.randint(self.size)
            if self.traj_id[j] != my_tid:
                return j


# ---------------------------------
# 2) 聚类跨轨迹 (Cluster-Stitch/OCBC)
# ---------------------------------
@dataclasses.dataclass
class ClusterStitchGCDataset(CrossTrajectoryMixin, GCDataset):
    cross_p: float = 0.6
    n_clusters: int = 50
    cluster_seed: int = 0
    geom_sample_cross: bool = False  # True: 几何分布采样未来；False: 匀采未来

    def __post_init__(self):
        super().__post_init__()

        # 轨迹编号 & 每轨尾索引
        self.traj_id = np.zeros(self.size, dtype=np.int32)
        tid = 0
        term_set = set(self.terminal_locs.tolist())
        for i in range(self.size):
            self.traj_id[i] = tid
            if i in term_set:
                tid += 1
        self.num_trajs = tid  # 修复
        self.traj_last = {
            t: np.flatnonzero(self.traj_id == t)[-1]
            for t in range(self.num_trajs)      # 修复：不用 +1
        }

        # 聚类（把观测摊平成向量）
        obs = self.dataset["observations"].reshape(self.size, -1).astype(np.float32)
        km = MiniBatchKMeans(
            n_clusters=self.n_clusters,
            random_state=self.cluster_seed,
            batch_size=1024,
            n_init="auto",
        )
        self.labels = km.fit_predict(obs)
        self.cluster2idx = {c: np.flatnonzero(self.labels == c) for c in range(self.n_clusters)}

    def _sample_cross_goal(self, idx: int) -> int:
        # 1) 取同一 cluster 的一个 waypoint（可能跨轨迹）
        c = int(self.labels[idx])
        idxs_in_c = self.cluster2idx[c]
        # 极小概率 cluster 为空（理论上不会），做个保护
        if idxs_in_c.size == 0:
            w = np.random.randint(self.size)
        else:
            w = np.random.choice(idxs_in_c)

        traj = int(self.traj_id[w])
        traj_end = int(self.traj_last[traj])

        # 2) 从 waypoint → 该轨迹终点采未来状态
        if self.geom_sample_cross:
            offset = np.random.geometric(p=1 - self.config["discount"])
            g = min(w + offset, traj_end)
        else:
            g = np.random.randint(w, traj_end + 1)
        return int(g)


@dataclasses.dataclass
class ClusterStitchHGCDataset(CrossTrajectoryMixin, HGCDataset):
    cross_p: float = 0.6
    n_clusters: int = 50
    cluster_seed: int = 0
    geom_sample_cross: bool = False

    def __post_init__(self):
        super().__post_init__()

        self.traj_id = np.zeros(self.size, dtype=np.int32)
        tid = 0
        term_set = set(self.terminal_locs.tolist())
        for i in range(self.size):
            self.traj_id[i] = tid
            if i in term_set:
                tid += 1
        self.num_trajs = tid  # 修复
        self.traj_last = {
            t: np.flatnonzero(self.traj_id == t)[-1]
            for t in range(self.num_trajs)      # 修复：不用 +1
        }

        obs = self.dataset["observations"].reshape(self.size, -1).astype(np.float32)
        km = MiniBatchKMeans(
            n_clusters=self.n_clusters,
            random_state=self.cluster_seed,
            batch_size=1024,
            n_init="auto",
        )
        self.labels = km.fit_predict(obs)
        self.cluster2idx = {c: np.flatnonzero(self.labels == c) for c in range(self.n_clusters)}

    def _sample_cross_goal(self, idx: int) -> int:
        c = int(self.labels[idx])
        idxs_in_c = self.cluster2idx[c]
        if idxs_in_c.size == 0:
            w = np.random.randint(self.size)
        else:
            w = np.random.choice(idxs_in_c)

        traj = int(self.traj_id[w])
        traj_end = int(self.traj_last[traj])

        if self.geom_sample_cross:
            offset = np.random.geometric(p=1 - self.config["discount"])
            g = min(w + offset, traj_end)
        else:
            g = np.random.randint(w, traj_end + 1)
        return int(g)



import dataclasses
import numpy as np

# 假设 _KeyStateMixin、GCDataset、HGCDataset、KeyJsonGCDataset、KeyJsonHGCDataset
# 都已经在本文件里定义

import dataclasses
import numpy as np

# 确保以下已在同文件里定义或已导入：
# _KeyStateMixin, GCDataset, KeyJsonGCDataset

@dataclasses.dataclass
class DecoupledKeyJsonGCDataset(_KeyStateMixin, GCDataset):
    """
    Critic uses uniform‐future from GCDataset.sample_goals;
    Actor uses JSON key‐state relabel from KeyJsonGCDataset.sample_goals.
    """
    json_path:     str   = "./key_states/PointMaze.json"
    use_keystate:  bool  = True
    keystate_name: str   = "ks_any"
    ks_prob:       float = 0.5
    ks_horizon:    int   = 20

    def __post_init__(self):
        # 先走 GCDataset 的初始化（构建 terminal_locs、frame_stack 等）
        super().__post_init__()
        # 再走 KeyStateMixin 的初始化（编译 JSON 判别器并建表）
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def sample(self, batch_size, idxs=None, evaluation=False):
        # ─── 1) 确保 idxs 有值 ───
        if idxs is None:
            idxs = self.dataset.get_random_idxs(batch_size)

        # ─── 2) 基础 observations / next_observations ───
        batch = self.dataset.sample(batch_size, idxs)
        if self.config['frame_stack'] is not None:
            batch['observations']      = self.get_observations(idxs)
            batch['next_observations'] = self.get_observations(idxs + 1)

        # ─── 3) Critic 的 value_goals：uniform‐future ───
        val_idxs = GCDataset.sample_goals(
            self,
            idxs,
            self.config['value_p_curgoal'],
            self.config['value_p_trajgoal'],
            self.config['value_p_randomgoal'],
            self.config['value_geom_sample'],
        )
        batch['value_goals'] = self.get_observations(val_idxs)

        # ─── 4) Actor 的 actor_goals：JSON key‐state relabel ───
        act_idxs = KeyJsonGCDataset.sample_goals(
            self,
            idxs,
            self.config['actor_p_curgoal'],
            self.config['actor_p_trajgoal'],
            self.config['actor_p_randomgoal'],
            self.config['actor_geom_sample'],
        )
        batch['actor_goals'] = self.get_observations(act_idxs)

        # ─── 5) 计算 masks & rewards ───
        successes = (idxs == val_idxs).astype(float)
        batch['masks']   = 1.0 - successes
        batch['rewards'] = successes - (1.0 if self.config['gc_negative'] else 0.0)

        # ─── 6) 可选的图像增强 ───
        if self.config.get('p_aug') is not None and not evaluation:
            if np.random.rand() < self.config['p_aug']:
                self.augment(
                    batch,
                    ['observations', 'next_observations', 'value_goals', 'actor_goals']
                )

        return batch


@dataclasses.dataclass
class DecoupledKeyJsonHGCDataset(_KeyStateMixin, HGCDataset):
    """
    HGCDataset + JSON key-state relabel for both low- and high-level actor goals:
      - Critic (value_goals): uniform‐future from HGCDataset.sample
      - Low-level actor_goals: JSON key‐state relabel via KeyJsonHGCDataset.sample_goals
      - High-level actor_goals: JSON key‐state relabel via KeyJsonHGCDataset.sample_goals
      - High-level targets (high_actor_targets) remain as in HGCDataset
    """
    json_path:     str   = "./key_states/PointMaze.json"
    use_keystate:  bool  = True
    keystate_name: str   = "ks_any"
    ks_prob:       float = 0.5
    ks_horizon:    int   = 20

    def __post_init__(self):
        # 初始化 HGCDataset → 构建 value_goals, low_actor_goals, high_actor_goals, high_actor_targets
        super().__post_init__()
        # 初始化 Key-State 判别器和查表
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def sample(self, batch_size: int, idxs=None, evaluation: bool = False):
        # 1) 保证 idxs 不为 None
        if idxs is None:
            idxs = self.dataset.get_random_idxs(batch_size)

        # 2) 调用 HGCDataset.sample，获得 observations/next_observations，
        #    value_goals, low_actor_goals, high_actor_goals, high_actor_targets
        batch = super().sample(batch_size, idxs, evaluation)

        # 3) 覆写低层 actor_goals（low_actor_goals）为 JSON key-state relabel
        low_idxs = KeyJsonHGCDataset.sample_goals(
            self, idxs,
            self.config['actor_p_curgoal'],
            self.config['actor_p_trajgoal'],
            self.config['actor_p_randomgoal'],
            self.config['actor_geom_sample'],
        )
        batch['low_actor_goals'] = self.get_observations(low_idxs)

        # 4) 覆写高层 actor_goals（high_actor_goals）为 JSON key-state relabel
        high_idxs = KeyJsonHGCDataset.sample_goals(
            self, idxs,
            self.config['actor_p_curgoal'],
            self.config['actor_p_trajgoal'],
            self.config['actor_p_randomgoal'],
            self.config['actor_geom_sample'],
        )
        batch['high_actor_goals'] = self.get_observations(high_idxs)

        # 5) high_actor_targets 保持父类逻辑，不做修改

        return batch
    
# =========================================================
# LLM Stitchers + LGDA（语言规则跨轨迹 + 可选 keystate）
# =========================================================
import textwrap, json
from pathlib import Path
from typing import Dict, Callable, Optional, Tuple

def _load_stitchers_json(json_path: str) -> Dict[str, Callable]:
    """从 JSON['stitchers'] 读取并编译函数。若不存在则返回空字典。"""
    try:
        js = json.loads(Path(json_path).read_text())
    except Exception:
        return {}
    st = {}
    # 创建共享命名空间
    shared_ns = {}
    
    # 先编译所有基础缝合器
    for name, src in js.get("stitchers", {}).items():
        if name == "stitch_score":
            continue  # 最后处理聚合函数
            
        code = textwrap.dedent(src.replace("\\n", "\n"))
        try:
            exec(code, shared_ns, shared_ns)
            fn = next((v for v in shared_ns.values() if callable(v)), None)
            if fn is not None:
                st[name] = fn
                # 添加到共享命名空间
                shared_ns[name] = fn
        except Exception as e:
            print(f"Error compiling stitcher {name}: {e}")
    
    # 最后编译聚合函数
    if "stitch_score" in js.get("stitchers", {}):
        code = textwrap.dedent(js["stitchers"]["stitch_score"].replace("\\n", "\n"))
        try:
            exec(code, shared_ns, shared_ns)
            fn = next((v for v in shared_ns.values() if callable(v)), None)
            if fn is not None:
                st["stitch_score"] = fn
        except Exception as e:
            print(f"Error compiling stitch_score: {e}")
    
    return st


class _LLMStitcherMixin:
    """
    通用 LLM 拼接器：
      1) 先调用父类 sample_goals 得到 base goals；
      2) 调用 _keystate_patch 做同轨迹关键状态替换（由 LGDA* 实现；默认无操作）；
      3) 以 stitch_prob 的概率，用 LLM 规则在随机候选里挑分最高的“跨轨迹”目标替换。
    """
    # —— 可调参数（可被 dataclass 字段/外部构造覆盖）——
    json_path: str = "./key_states/AntMaze.json"

    stitch_prob: float = 0.3            # 跨轨迹替换概率
    stitch_threshold: float = 0.6       # 接受替换的最小分数
    stitch_k_candidates: int = 64       # 候选集大小（随机采样）
    stitch_require_diff_traj: bool = True
    stitch_use_action: bool = True      # 调用 stitcher 时是否传入动作
    _stitch_mode: str = "random"        # 目前仅用 random

    # 运行期对象
    _stitchers: Dict[str, Callable] = None
    _stitch_agg: Optional[Callable] = None
    _map_info: Optional[dict] = None

    def set_map_info(self, d: Optional[dict]):
        self._map_info = d

    def __post_init__(self):
        # 父类 (GC/HGC) 初始化
        super().__post_init__()

        # 轨迹编号
        self.traj_id = np.zeros(self.size, dtype=np.int32)
        tid = 0
        term_set = set(self.terminal_locs.tolist())
        for i in range(self.size):
            self.traj_id[i] = tid
            if i in term_set:
                tid += 1
        self.num_trajs = tid

        # 载入 stitchers 与聚合器
        self._stitchers = _load_stitchers_json(self.json_path)
        self._stitch_agg = self._build_stitch_aggregator(self._stitchers)

    # —— 聚合不同 stitchers 的分数 —— 
    # 修改 _build_stitch_aggregator 函数
    def _build_stitch_aggregator(self, st: Dict[str, Callable]) -> Callable:
        if not st:
            def _none(si, ai, sj, map_info=None):
                return 0.0, "no_stitchers"
            return _none

        if "stitch_score" in st:
            agg = st["stitch_score"]
            # 创建闭包捕获所有缝合器函数
            stitchers = {k: v for k, v in st.items() if k != "stitch_score"}
            
            def _agg(si, ai, sj, map_info=None):
                try:
                    # 将缝合器函数注入作用域
                    local_ns = {"__builtins__": __builtins__}
                    local_ns.update(stitchers)
                    local_ns.update({
                        "si": si, "ai": ai, "sj": sj, "map_info": map_info
                    })
                    
                    # 使用安全的方式执行
                    return agg(si, ai, sj, map_info=map_info)
                except TypeError:
                    try:
                        return agg(si, ai, sj) if ai is not None else agg(si, sj)
                    except Exception as e:
                        print(f"Error in stitch_score: {e}")
                        return 0.0, f"error: {str(e)}"
                except Exception as e:
                    print(f"Error in stitch_score: {e}")
                    return 0.0, f"error: {str(e)}"
            return _agg

        # 如果没有聚合函数，使用平均值
        def _avg(si, ai, sj, map_info=None):
            scores = []
            for _, fn in st.items():
                try:
                    out = fn(si, ai, sj, map_info=map_info)
                except TypeError:
                    out = fn(si, ai, sj) if ai is not None else fn(si, sj)
                s = out[0] if isinstance(out, tuple) else out
                scores.append(float(np.clip(float(s), 0.0, 1.0)))
            return (float(np.mean(scores)) if scores else 0.0), "mean"
        return _avg

    # —— 计算单对 (si, sj) 的分数 —— 
    def _score(self, si, ai, sj) -> Tuple[float, str]:
        try:
            return self._stitch_agg(si, ai, sj, map_info=self._map_info)
        except Exception as e:
            print(f"Error in stitching score: {e}")
            return 0.0, f"error: {str(e)}"

    # —— 从全局随机候选里挑最佳（lgda 用）——
    def _choose_cross_goal_random(self, i: int) -> int:
        K = int(self.stitch_k_candidates)
        if K <= 0:
            return -1
        cand = np.random.randint(0, self.size, size=K)
        if self.stitch_require_diff_traj:
            cand = cand[self.traj_id[cand] != self.traj_id[i]]
            if cand.size == 0:
                return -1

        si = self.dataset["observations"][i]
        ai = self.dataset["actions"][i] if (self.stitch_use_action and "actions" in self.dataset) else None

        best_j, best_s = -1, -1.0
        for j in cand:
            try:
                sj = self.dataset["observations"][j]
                s, _ = self._score(si, ai, sj)
                if s > best_s:
                    best_s, best_j = s, int(j)
            except Exception as e:
                print(f"Error scoring candidate {j}: {e}")
        return best_j if best_s >= float(self.stitch_threshold) else -1

    # —— keystate 钩子（LGDA* 中实现，默认不改）——
    def _keystate_patch(self, base_goals: np.ndarray, idxs: np.ndarray) -> np.ndarray:
        return base_goals

    # —— 覆写 sample_goals：父类 → keystate（可选）→ LLM 跨轨迹 —— 
    def sample_goals(self, idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample):
        goals = super().sample_goals(idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample).copy()

        # 先做 keystate 同轨迹替换
        goals = self._keystate_patch(goals, idxs)

        # 再做 LLM 跨轨迹替换
        if self._stitchers and self.stitch_prob > 0:
            mask = (np.random.rand(len(idxs)) < float(self.stitch_prob))
            for b, do in enumerate(mask):
                if not do:
                    continue
                i = int(idxs[b])
                j = self._choose_cross_goal_random(i)  # lgda 使用随机候选
                if j >= 0:
                    goals[b] = j

        return goals.astype(np.int32)


# -------------------------
# LGDA × GCDataset
# -------------------------
@dataclasses.dataclass
class LGDAGCDataset(_LLMStitcherMixin, _KeyStateMixin, GCDataset):
    """
    lgda (语言规则跨轨迹 + 可选 keystate) for GC：
      - 影响 value_goals 与 actor_goals（因 GC 两者都走 sample_goals）
    """
    # —— LLM stitchers / 跨轨迹相关（需要作为 dataclass 字段，才能在 __init__ 接收）——
    json_path: str = "./key_states/AntMaze.json"   # 既用于 stitchers 也用于 keystate
    stitch_prob: float = 0.05                      # 初始跨轨迹概率（会在 main 里升温）
    stitch_threshold: float = 0.6
    stitch_k_candidates: int = 64
    stitch_require_diff_traj: bool = True
    stitch_use_action: bool = True

    # —— keystate 相关（与 --llm 对齐；lgda 默认开启）——
    use_keystate:  bool  = True
    keystate_name: str   = "ks_any"
    ks_prob:       float = 0.3
    ks_horizon:    int   = 20

    def __post_init__(self):
        # 先跑 LLM stitcher 的初始化（会加载 stitchers 并建 traj_id）
        _LLMStitcherMixin.__post_init__(self)
        # 再初始化 keystate（可选）
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def _keystate_patch(self, base_goals: np.ndarray, idxs: np.ndarray) -> np.ndarray:
        if not getattr(self, "use_keystate", False) or self.ks_prob <= 0:
            return base_goals
        ks_future = self._nearest_keystate(idxs, self.keystate_name).astype(np.int32)  # -1 表示未来无命中
        valid = (ks_future >= 0) & ((ks_future - idxs) <= int(self.ks_horizon))
        use_ks = (np.random.rand(len(idxs)) < float(self.ks_prob)) & valid
        out = base_goals.copy()
        out[use_ks] = ks_future[use_ks]
        return out


# -------------------------
# LGDA × HGCDataset
# -------------------------
@dataclasses.dataclass
class LGDAHGCDataset(_LLMStitcherMixin, _KeyStateMixin, HGCDataset):
    """
    lgda (语言规则跨轨迹 + 可选 keystate) for HGC：
      - 只影响 value_goals（HGC 里 value_goals 通过 sample_goals 采样）
      - low/high actor 的目标仍按 HGCDataset 原逻辑
    """
    # —— LLM stitchers / 跨轨迹相关 —— 
    json_path: str = "./key_states/AntMaze.json"
    stitch_prob: float = 0.05
    stitch_threshold: float = 0.6
    stitch_k_candidates: int = 64
    stitch_require_diff_traj: bool = True
    stitch_use_action: bool = True

    # —— keystate 相关 —— 
    use_keystate:  bool  = True
    keystate_name: str   = "ks_any"
    ks_prob:       float = 0.3
    ks_horizon:    int   = 20

    def __post_init__(self):
        _LLMStitcherMixin.__post_init__(self)
        if self.use_keystate:
            self._init_keystate(self.json_path, self.keystate_name)

    def _keystate_patch(self, base_goals: np.ndarray, idxs: np.ndarray) -> np.ndarray:
        if not getattr(self, "use_keystate", False) or self.ks_prob <= 0:
            return base_goals
        ks_future = self._nearest_keystate(idxs, self.keystate_name).astype(np.int32)
        valid = (ks_future >= 0) & ((ks_future - idxs) <= int(self.ks_horizon))
        use_ks = (np.random.rand(len(idxs)) < float(self.ks_prob)) & valid
        out = base_goals.copy()
        out[use_ks] = ks_future[use_ks]
        return out
